aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNiall Sheridan <nsheridan@gmail.com>2016-06-06 14:44:38 +0100
committerNiall Sheridan <nsheridan@gmail.com>2016-06-06 14:44:38 +0100
commit067ad51b6a6ee8829612f51a6e6b2ade3eaa61b3 (patch)
tree301d2a7303e2b5db0c3fa1911d799e16f921d9dc
parent5ca59399fe0880368f88d17c0c9d781d836969c6 (diff)
parenta18a13fb09eac00cdacf1f74080524182b7243de (diff)
Merge pull request #17 from nsheridan/vendor
update dependencies
-rw-r--r--vendor/github.com/aws/aws-sdk-go/LICENSE.txt202
-rw-r--r--vendor/github.com/aws/aws-sdk-go/NOTICE.txt3
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go145
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go194
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go100
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go27
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go222
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go107
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go89
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/client/client.go120
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go90
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go12
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/config.go358
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/convert_types.go369
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go152
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go17
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go100
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go223
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go178
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go77
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini12
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go151
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go48
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go98
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go140
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go124
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/errors.go17
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/logger.go112
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go187
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go33
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go31
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go49
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/request.go329
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go104
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go101
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/validation.go234
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/session/session.go120
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/types.go106
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/version.go8
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go65
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json75
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go88
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go75
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go36
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go230
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go35
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go66
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go256
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go45
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go198
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go69
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go21
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go293
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go260
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go105
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go82
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/signer/v4/v4.go465
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go134
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/api.go8187
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go43
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go36
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go46
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go165
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go8
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go28
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/service.go86
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/sse.go44
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go36
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go59
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go123
-rw-r--r--vendor/github.com/go-ini/ini/LICENSE191
-rw-r--r--vendor/github.com/go-ini/ini/README.md560
-rw-r--r--vendor/github.com/go-ini/ini/README_ZH.md547
-rw-r--r--vendor/github.com/go-ini/ini/ini.go1226
-rw-r--r--vendor/github.com/go-ini/ini/struct.go350
-rw-r--r--vendor/github.com/golang/protobuf/LICENSE31
-rw-r--r--vendor/github.com/golang/protobuf/proto/Makefile43
-rw-r--r--vendor/github.com/golang/protobuf/proto/clone.go223
-rw-r--r--vendor/github.com/golang/protobuf/proto/decode.go868
-rw-r--r--vendor/github.com/golang/protobuf/proto/encode.go1331
-rw-r--r--vendor/github.com/golang/protobuf/proto/equal.go276
-rw-r--r--vendor/github.com/golang/protobuf/proto/extensions.go399
-rw-r--r--vendor/github.com/golang/protobuf/proto/lib.go894
-rw-r--r--vendor/github.com/golang/protobuf/proto/message_set.go280
-rw-r--r--vendor/github.com/golang/protobuf/proto/pointer_reflect.go479
-rw-r--r--vendor/github.com/golang/protobuf/proto/pointer_unsafe.go266
-rw-r--r--vendor/github.com/golang/protobuf/proto/properties.go846
-rw-r--r--vendor/github.com/golang/protobuf/proto/text.go849
-rw-r--r--vendor/github.com/golang/protobuf/proto/text_parser.go871
-rw-r--r--vendor/github.com/jmespath/go-jmespath/LICENSE13
-rw-r--r--vendor/github.com/jmespath/go-jmespath/Makefile44
-rw-r--r--vendor/github.com/jmespath/go-jmespath/README.md7
-rw-r--r--vendor/github.com/jmespath/go-jmespath/api.go49
-rw-r--r--vendor/github.com/jmespath/go-jmespath/astnodetype_string.go16
-rw-r--r--vendor/github.com/jmespath/go-jmespath/functions.go842
-rw-r--r--vendor/github.com/jmespath/go-jmespath/interpreter.go418
-rw-r--r--vendor/github.com/jmespath/go-jmespath/lexer.go420
-rw-r--r--vendor/github.com/jmespath/go-jmespath/parser.go603
-rw-r--r--vendor/github.com/jmespath/go-jmespath/toktype_string.go16
-rw-r--r--vendor/github.com/jmespath/go-jmespath/util.go185
-rw-r--r--vendor/go4.org/LICENSE202
-rw-r--r--vendor/go4.org/wkfs/gcs/gcs.go196
-rw-r--r--vendor/go4.org/wkfs/wkfs.go132
-rw-r--r--vendor/golang.org/x/net/http2/Dockerfile51
-rw-r--r--vendor/golang.org/x/net/http2/Makefile3
-rw-r--r--vendor/golang.org/x/net/http2/README20
-rw-r--r--vendor/golang.org/x/net/http2/client_conn_pool.go246
-rw-r--r--vendor/golang.org/x/net/http2/configure_transport.go80
-rw-r--r--vendor/golang.org/x/net/http2/errors.go122
-rw-r--r--vendor/golang.org/x/net/http2/fixed_buffer.go60
-rw-r--r--vendor/golang.org/x/net/http2/flow.go50
-rw-r--r--vendor/golang.org/x/net/http2/frame.go1507
-rw-r--r--vendor/golang.org/x/net/http2/go16.go16
-rw-r--r--vendor/golang.org/x/net/http2/go17.go94
-rw-r--r--vendor/golang.org/x/net/http2/gotrack.go170
-rw-r--r--vendor/golang.org/x/net/http2/headermap.go78
-rw-r--r--vendor/golang.org/x/net/http2/hpack/encode.go251
-rw-r--r--vendor/golang.org/x/net/http2/hpack/hpack.go542
-rw-r--r--vendor/golang.org/x/net/http2/hpack/huffman.go212
-rw-r--r--vendor/golang.org/x/net/http2/hpack/tables.go352
-rw-r--r--vendor/golang.org/x/net/http2/http2.go351
-rw-r--r--vendor/golang.org/x/net/http2/not_go16.go20
-rw-r--r--vendor/golang.org/x/net/http2/not_go17.go51
-rw-r--r--vendor/golang.org/x/net/http2/pipe.go147
-rw-r--r--vendor/golang.org/x/net/http2/server.go2287
-rw-r--r--vendor/golang.org/x/net/http2/transport.go1816
-rw-r--r--vendor/golang.org/x/net/http2/write.go264
-rw-r--r--vendor/golang.org/x/net/http2/writesched.go283
-rw-r--r--vendor/golang.org/x/net/internal/timeseries/timeseries.go525
-rw-r--r--vendor/golang.org/x/net/lex/httplex/httplex.go312
-rw-r--r--vendor/golang.org/x/net/trace/events.go524
-rw-r--r--vendor/golang.org/x/net/trace/histogram.go356
-rw-r--r--vendor/golang.org/x/net/trace/trace.go1063
-rw-r--r--vendor/google.golang.org/api/storage/v1/storage-api.json2861
-rw-r--r--vendor/google.golang.org/api/storage/v1/storage-gen.go7740
-rw-r--r--vendor/google.golang.org/appengine/LICENSE202
-rw-r--r--vendor/google.golang.org/appengine/README.md71
-rw-r--r--vendor/google.golang.org/appengine/appengine.go76
-rw-r--r--vendor/google.golang.org/appengine/appengine_vm.go56
-rw-r--r--vendor/google.golang.org/appengine/errors.go46
-rw-r--r--vendor/google.golang.org/appengine/identity.go142
-rw-r--r--vendor/google.golang.org/appengine/internal/api.go509
-rw-r--r--vendor/google.golang.org/appengine/internal/api_classic.go159
-rw-r--r--vendor/google.golang.org/appengine/internal/api_common.go86
-rw-r--r--vendor/google.golang.org/appengine/internal/app_id.go28
-rw-r--r--vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go296
-rw-r--r--vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto64
-rw-r--r--vendor/google.golang.org/appengine/internal/base/api_base.pb.go133
-rw-r--r--vendor/google.golang.org/appengine/internal/base/api_base.proto33
-rw-r--r--vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go2778
-rwxr-xr-xvendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto541
-rw-r--r--vendor/google.golang.org/appengine/internal/identity.go14
-rw-r--r--vendor/google.golang.org/appengine/internal/identity_classic.go27
-rw-r--r--vendor/google.golang.org/appengine/internal/identity_vm.go97
-rw-r--r--vendor/google.golang.org/appengine/internal/internal.go144
-rw-r--r--vendor/google.golang.org/appengine/internal/log_vm.go109
-rw-r--r--vendor/google.golang.org/appengine/internal/metadata.go61
-rw-r--r--vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go375
-rw-r--r--vendor/google.golang.org/appengine/internal/modules/modules_service.proto80
-rw-r--r--vendor/google.golang.org/appengine/internal/net.go56
-rwxr-xr-xvendor/google.golang.org/appengine/internal/regen.sh40
-rw-r--r--vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go231
-rw-r--r--vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto44
-rw-r--r--vendor/google.golang.org/appengine/internal/transaction.go107
-rw-r--r--vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go355
-rw-r--r--vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto64
-rw-r--r--vendor/google.golang.org/appengine/namespace.go25
-rw-r--r--vendor/google.golang.org/appengine/timeout.go20
-rw-r--r--vendor/google.golang.org/appengine/urlfetch/urlfetch.go210
-rw-r--r--vendor/google.golang.org/cloud/AUTHORS14
-rw-r--r--vendor/google.golang.org/cloud/CONTRIBUTING.md115
-rw-r--r--vendor/google.golang.org/cloud/CONTRIBUTORS29
-rw-r--r--vendor/google.golang.org/cloud/README.md186
-rw-r--r--vendor/google.golang.org/cloud/cloud.go56
-rw-r--r--vendor/google.golang.org/cloud/internal/opts/option.go25
-rw-r--r--vendor/google.golang.org/cloud/internal/transport/dial.go101
-rw-r--r--vendor/google.golang.org/cloud/key.json.encbin0 -> 1248 bytes
-rw-r--r--vendor/google.golang.org/cloud/option.go114
-rw-r--r--vendor/google.golang.org/cloud/storage/acl.go204
-rw-r--r--vendor/google.golang.org/cloud/storage/reader.go55
-rw-r--r--vendor/google.golang.org/cloud/storage/storage.go1026
-rw-r--r--vendor/google.golang.org/cloud/storage/writer.go129
-rw-r--r--vendor/google.golang.org/grpc/CONTRIBUTING.md46
-rw-r--r--vendor/google.golang.org/grpc/LICENSE28
-rw-r--r--vendor/google.golang.org/grpc/Makefile51
-rw-r--r--vendor/google.golang.org/grpc/PATENTS22
-rw-r--r--vendor/google.golang.org/grpc/README.md32
-rw-r--r--vendor/google.golang.org/grpc/backoff.go80
-rw-r--r--vendor/google.golang.org/grpc/balancer.go340
-rw-r--r--vendor/google.golang.org/grpc/call.go215
-rw-r--r--vendor/google.golang.org/grpc/clientconn.go724
-rwxr-xr-xvendor/google.golang.org/grpc/codegen.sh17
-rw-r--r--vendor/google.golang.org/grpc/codes/code_string.go16
-rw-r--r--vendor/google.golang.org/grpc/codes/codes.go159
-rwxr-xr-xvendor/google.golang.org/grpc/coverage.sh47
-rw-r--r--vendor/google.golang.org/grpc/credentials/credentials.go226
-rw-r--r--vendor/google.golang.org/grpc/credentials/oauth/oauth.go177
-rw-r--r--vendor/google.golang.org/grpc/doc.go6
-rw-r--r--vendor/google.golang.org/grpc/grpclog/logger.go93
-rw-r--r--vendor/google.golang.org/grpc/interceptor.go74
-rw-r--r--vendor/google.golang.org/grpc/internal/internal.go49
-rw-r--r--vendor/google.golang.org/grpc/metadata/metadata.go134
-rw-r--r--vendor/google.golang.org/grpc/naming/naming.go74
-rw-r--r--vendor/google.golang.org/grpc/peer/peer.go65
-rw-r--r--vendor/google.golang.org/grpc/rpc_util.go415
-rw-r--r--vendor/google.golang.org/grpc/server.go787
-rw-r--r--vendor/google.golang.org/grpc/stream.go424
-rw-r--r--vendor/google.golang.org/grpc/trace.go119
-rw-r--r--vendor/google.golang.org/grpc/transport/control.go210
-rw-r--r--vendor/google.golang.org/grpc/transport/handler_server.go393
-rw-r--r--vendor/google.golang.org/grpc/transport/http2_client.go953
-rw-r--r--vendor/google.golang.org/grpc/transport/http2_server.go743
-rw-r--r--vendor/google.golang.org/grpc/transport/http_util.go422
-rw-r--r--vendor/google.golang.org/grpc/transport/transport.go513
-rw-r--r--vendor/vendor.json338
215 files changed, 69700 insertions, 0 deletions
diff --git a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt
new file mode 100644
index 0000000..5f14d11
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt
@@ -0,0 +1,3 @@
+AWS SDK for Go
+Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+Copyright 2014-2015 Stripe, Inc.
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go
new file mode 100644
index 0000000..e50771f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go
@@ -0,0 +1,145 @@
+// Package awserr represents API error interface accessors for the SDK.
+package awserr
+
+// An Error wraps lower level errors with code, message and an original error.
+// The underlying concrete error type may also satisfy other interfaces which
+// can be to used to obtain more specific information about the error.
+//
+// Calling Error() or String() will always include the full information about
+// an error based on its underlying type.
+//
+// Example:
+//
+// output, err := s3manage.Upload(svc, input, opts)
+// if err != nil {
+// if awsErr, ok := err.(awserr.Error); ok {
+// // Get error details
+// log.Println("Error:", awsErr.Code(), awsErr.Message())
+//
+// // Prints out full error message, including original error if there was one.
+// log.Println("Error:", awsErr.Error())
+//
+// // Get original error
+// if origErr := awsErr.OrigErr(); origErr != nil {
+// // operate on original error.
+// }
+// } else {
+// fmt.Println(err.Error())
+// }
+// }
+//
+type Error interface {
+ // Satisfy the generic error interface.
+ error
+
+ // Returns the short phrase depicting the classification of the error.
+ Code() string
+
+ // Returns the error details message.
+ Message() string
+
+ // Returns the original error if one was set. Nil is returned if not set.
+ OrigErr() error
+}
+
+// BatchError is a batch of errors which also wraps lower level errors with
+// code, message, and original errors. Calling Error() will include all errors
+// that occured in the batch.
+//
+// Deprecated: Replaced with BatchedErrors. Only defined for backwards
+// compatibility.
+type BatchError interface {
+ // Satisfy the generic error interface.
+ error
+
+ // Returns the short phrase depicting the classification of the error.
+ Code() string
+
+ // Returns the error details message.
+ Message() string
+
+ // Returns the original error if one was set. Nil is returned if not set.
+ OrigErrs() []error
+}
+
+// BatchedErrors is a batch of errors which also wraps lower level errors with
+// code, message, and original errors. Calling Error() will include all errors
+// that occured in the batch.
+//
+// Replaces BatchError
+type BatchedErrors interface {
+ // Satisfy the base Error interface.
+ Error
+
+ // Returns the original error if one was set. Nil is returned if not set.
+ OrigErrs() []error
+}
+
+// New returns an Error object described by the code, message, and origErr.
+//
+// If origErr satisfies the Error interface it will not be wrapped within a new
+// Error object and will instead be returned.
+func New(code, message string, origErr error) Error {
+ var errs []error
+ if origErr != nil {
+ errs = append(errs, origErr)
+ }
+ return newBaseError(code, message, errs)
+}
+
+// NewBatchError returns an BatchedErrors with a collection of errors as an
+// array of errors.
+func NewBatchError(code, message string, errs []error) BatchedErrors {
+ return newBaseError(code, message, errs)
+}
+
+// A RequestFailure is an interface to extract request failure information from
+// an Error such as the request ID of the failed request returned by a service.
+// RequestFailures may not always have a requestID value if the request failed
+// prior to reaching the service such as a connection error.
+//
+// Example:
+//
+// output, err := s3manage.Upload(svc, input, opts)
+// if err != nil {
+// if reqerr, ok := err.(RequestFailure); ok {
+// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID())
+// } else {
+// log.Println("Error:", err.Error())
+// }
+// }
+//
+// Combined with awserr.Error:
+//
+// output, err := s3manage.Upload(svc, input, opts)
+// if err != nil {
+// if awsErr, ok := err.(awserr.Error); ok {
+// // Generic AWS Error with Code, Message, and original error (if any)
+// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr())
+//
+// if reqErr, ok := err.(awserr.RequestFailure); ok {
+// // A service error occurred
+// fmt.Println(reqErr.StatusCode(), reqErr.RequestID())
+// }
+// } else {
+// fmt.Println(err.Error())
+// }
+// }
+//
+type RequestFailure interface {
+ Error
+
+ // The status code of the HTTP response.
+ StatusCode() int
+
+ // The request ID returned by the service for a request failure. This will
+ // be empty if no request ID is available such as the request failed due
+ // to a connection error.
+ RequestID() string
+}
+
+// NewRequestFailure returns a new request error wrapper for the given Error
+// provided.
+func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure {
+ return newRequestError(err, statusCode, reqID)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go
new file mode 100644
index 0000000..e2d333b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go
@@ -0,0 +1,194 @@
+package awserr
+
+import "fmt"
+
+// SprintError returns a string of the formatted error code.
+//
+// Both extra and origErr are optional. If they are included their lines
+// will be added, but if they are not included their lines will be ignored.
+func SprintError(code, message, extra string, origErr error) string {
+ msg := fmt.Sprintf("%s: %s", code, message)
+ if extra != "" {
+ msg = fmt.Sprintf("%s\n\t%s", msg, extra)
+ }
+ if origErr != nil {
+ msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error())
+ }
+ return msg
+}
+
+// A baseError wraps the code and message which defines an error. It also
+// can be used to wrap an original error object.
+//
+// Should be used as the root for errors satisfying the awserr.Error. Also
+// for any error which does not fit into a specific error wrapper type.
+type baseError struct {
+ // Classification of error
+ code string
+
+ // Detailed information about error
+ message string
+
+ // Optional original error this error is based off of. Allows building
+ // chained errors.
+ errs []error
+}
+
+// newBaseError returns an error object for the code, message, and errors.
+//
+// code is a short no whitespace phrase depicting the classification of
+// the error that is being created.
+//
+// message is the free flow string containing detailed information about the
+// error.
+//
+// origErrs is the error objects which will be nested under the new errors to
+// be returned.
+func newBaseError(code, message string, origErrs []error) *baseError {
+ b := &baseError{
+ code: code,
+ message: message,
+ errs: origErrs,
+ }
+
+ return b
+}
+
+// Error returns the string representation of the error.
+//
+// See ErrorWithExtra for formatting.
+//
+// Satisfies the error interface.
+func (b baseError) Error() string {
+ size := len(b.errs)
+ if size > 0 {
+ return SprintError(b.code, b.message, "", errorList(b.errs))
+ }
+
+ return SprintError(b.code, b.message, "", nil)
+}
+
+// String returns the string representation of the error.
+// Alias for Error to satisfy the stringer interface.
+func (b baseError) String() string {
+ return b.Error()
+}
+
+// Code returns the short phrase depicting the classification of the error.
+func (b baseError) Code() string {
+ return b.code
+}
+
+// Message returns the error details message.
+func (b baseError) Message() string {
+ return b.message
+}
+
+// OrigErr returns the original error if one was set. Nil is returned if no
+// error was set. This only returns the first element in the list. If the full
+// list is needed, use BatchedErrors.
+func (b baseError) OrigErr() error {
+ switch len(b.errs) {
+ case 0:
+ return nil
+ case 1:
+ return b.errs[0]
+ default:
+ if err, ok := b.errs[0].(Error); ok {
+ return NewBatchError(err.Code(), err.Message(), b.errs[1:])
+ }
+ return NewBatchError("BatchedErrors",
+ "multiple errors occured", b.errs)
+ }
+}
+
+// OrigErrs returns the original errors if one was set. An empty slice is
+// returned if no error was set.
+func (b baseError) OrigErrs() []error {
+ return b.errs
+}
+
+// So that the Error interface type can be included as an anonymous field
+// in the requestError struct and not conflict with the error.Error() method.
+type awsError Error
+
+// A requestError wraps a request or service error.
+//
+// Composed of baseError for code, message, and original error.
+type requestError struct {
+ awsError
+ statusCode int
+ requestID string
+}
+
+// newRequestError returns a wrapped error with additional information for
+// request status code, and service requestID.
+//
+// Should be used to wrap all request which involve service requests. Even if
+// the request failed without a service response, but had an HTTP status code
+// that may be meaningful.
+//
+// Also wraps original errors via the baseError.
+func newRequestError(err Error, statusCode int, requestID string) *requestError {
+ return &requestError{
+ awsError: err,
+ statusCode: statusCode,
+ requestID: requestID,
+ }
+}
+
+// Error returns the string representation of the error.
+// Satisfies the error interface.
+func (r requestError) Error() string {
+ extra := fmt.Sprintf("status code: %d, request id: %s",
+ r.statusCode, r.requestID)
+ return SprintError(r.Code(), r.Message(), extra, r.OrigErr())
+}
+
+// String returns the string representation of the error.
+// Alias for Error to satisfy the stringer interface.
+func (r requestError) String() string {
+ return r.Error()
+}
+
+// StatusCode returns the wrapped status code for the error
+func (r requestError) StatusCode() int {
+ return r.statusCode
+}
+
+// RequestID returns the wrapped requestID
+func (r requestError) RequestID() string {
+ return r.requestID
+}
+
+// OrigErrs returns the original errors if one was set. An empty slice is
+// returned if no error was set.
+func (r requestError) OrigErrs() []error {
+ if b, ok := r.awsError.(BatchedErrors); ok {
+ return b.OrigErrs()
+ }
+ return []error{r.OrigErr()}
+}
+
+// An error list that satisfies the golang interface
+type errorList []error
+
+// Error returns the string representation of the error.
+//
+// Satisfies the error interface.
+func (e errorList) Error() string {
+ msg := ""
+ // How do we want to handle the array size being zero
+ if size := len(e); size > 0 {
+ for i := 0; i < size; i++ {
+ msg += fmt.Sprintf("%s", e[i].Error())
+ // We check the next index to see if it is within the slice.
+ // If it is, then we append a newline. We do this, because unit tests
+ // could be broken with the additional '\n'
+ if i+1 < size {
+ msg += "\n"
+ }
+ }
+ }
+ return msg
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
new file mode 100644
index 0000000..8429470
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go
@@ -0,0 +1,100 @@
+package awsutil
+
+import (
+ "io"
+ "reflect"
+)
+
+// Copy deeply copies a src structure to dst. Useful for copying request and
+// response structures.
+//
+// Can copy between structs of different type, but will only copy fields which
+// are assignable, and exist in both structs. Fields which are not assignable,
+// or do not exist in both structs are ignored.
+func Copy(dst, src interface{}) {
+ dstval := reflect.ValueOf(dst)
+ if !dstval.IsValid() {
+ panic("Copy dst cannot be nil")
+ }
+
+ rcopy(dstval, reflect.ValueOf(src), true)
+}
+
+// CopyOf returns a copy of src while also allocating the memory for dst.
+// src must be a pointer type or this operation will fail.
+func CopyOf(src interface{}) (dst interface{}) {
+ dsti := reflect.New(reflect.TypeOf(src).Elem())
+ dst = dsti.Interface()
+ rcopy(dsti, reflect.ValueOf(src), true)
+ return
+}
+
+// rcopy performs a recursive copy of values from the source to destination.
+//
+// root is used to skip certain aspects of the copy which are not valid
+// for the root node of a object.
+func rcopy(dst, src reflect.Value, root bool) {
+ if !src.IsValid() {
+ return
+ }
+
+ switch src.Kind() {
+ case reflect.Ptr:
+ if _, ok := src.Interface().(io.Reader); ok {
+ if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() {
+ dst.Elem().Set(src)
+ } else if dst.CanSet() {
+ dst.Set(src)
+ }
+ } else {
+ e := src.Type().Elem()
+ if dst.CanSet() && !src.IsNil() {
+ dst.Set(reflect.New(e))
+ }
+ if src.Elem().IsValid() {
+ // Keep the current root state since the depth hasn't changed
+ rcopy(dst.Elem(), src.Elem(), root)
+ }
+ }
+ case reflect.Struct:
+ t := dst.Type()
+ for i := 0; i < t.NumField(); i++ {
+ name := t.Field(i).Name
+ srcVal := src.FieldByName(name)
+ dstVal := dst.FieldByName(name)
+ if srcVal.IsValid() && dstVal.CanSet() {
+ rcopy(dstVal, srcVal, false)
+ }
+ }
+ case reflect.Slice:
+ if src.IsNil() {
+ break
+ }
+
+ s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap())
+ dst.Set(s)
+ for i := 0; i < src.Len(); i++ {
+ rcopy(dst.Index(i), src.Index(i), false)
+ }
+ case reflect.Map:
+ if src.IsNil() {
+ break
+ }
+
+ s := reflect.MakeMap(src.Type())
+ dst.Set(s)
+ for _, k := range src.MapKeys() {
+ v := src.MapIndex(k)
+ v2 := reflect.New(v.Type()).Elem()
+ rcopy(v2, v, false)
+ dst.SetMapIndex(k, v2)
+ }
+ default:
+ // Assign the value if possible. If its not assignable, the value would
+ // need to be converted and the impact of that may be unexpected, or is
+ // not compatible with the dst type.
+ if src.Type().AssignableTo(dst.Type()) {
+ dst.Set(src)
+ }
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go
new file mode 100644
index 0000000..59fa4a5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go
@@ -0,0 +1,27 @@
+package awsutil
+
+import (
+ "reflect"
+)
+
+// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual.
+// In addition to this, this method will also dereference the input values if
+// possible so the DeepEqual performed will not fail if one parameter is a
+// pointer and the other is not.
+//
+// DeepEqual will not perform indirection of nested values of the input parameters.
+func DeepEqual(a, b interface{}) bool {
+ ra := reflect.Indirect(reflect.ValueOf(a))
+ rb := reflect.Indirect(reflect.ValueOf(b))
+
+ if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid {
+ // If the elements are both nil, and of the same type the are equal
+ // If they are of different types they are not equal
+ return reflect.TypeOf(a) == reflect.TypeOf(b)
+ } else if raValid != rbValid {
+ // Both values must be valid to be equal
+ return false
+ }
+
+ return reflect.DeepEqual(ra.Interface(), rb.Interface())
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
new file mode 100644
index 0000000..4d2a01e
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go
@@ -0,0 +1,222 @@
+package awsutil
+
+import (
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/jmespath/go-jmespath"
+)
+
+var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`)
+
+// rValuesAtPath returns a slice of values found in value v. The values
+// in v are explored recursively so all nested values are collected.
+func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value {
+ pathparts := strings.Split(path, "||")
+ if len(pathparts) > 1 {
+ for _, pathpart := range pathparts {
+ vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm)
+ if len(vals) > 0 {
+ return vals
+ }
+ }
+ return nil
+ }
+
+ values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))}
+ components := strings.Split(path, ".")
+ for len(values) > 0 && len(components) > 0 {
+ var index *int64
+ var indexStar bool
+ c := strings.TrimSpace(components[0])
+ if c == "" { // no actual component, illegal syntax
+ return nil
+ } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] {
+ // TODO normalize case for user
+ return nil // don't support unexported fields
+ }
+
+ // parse this component
+ if m := indexRe.FindStringSubmatch(c); m != nil {
+ c = m[1]
+ if m[2] == "" {
+ index = nil
+ indexStar = true
+ } else {
+ i, _ := strconv.ParseInt(m[2], 10, 32)
+ index = &i
+ indexStar = false
+ }
+ }
+
+ nextvals := []reflect.Value{}
+ for _, value := range values {
+ // pull component name out of struct member
+ if value.Kind() != reflect.Struct {
+ continue
+ }
+
+ if c == "*" { // pull all members
+ for i := 0; i < value.NumField(); i++ {
+ if f := reflect.Indirect(value.Field(i)); f.IsValid() {
+ nextvals = append(nextvals, f)
+ }
+ }
+ continue
+ }
+
+ value = value.FieldByNameFunc(func(name string) bool {
+ if c == name {
+ return true
+ } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) {
+ return true
+ }
+ return false
+ })
+
+ if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 {
+ if !value.IsNil() {
+ value.Set(reflect.Zero(value.Type()))
+ }
+ return []reflect.Value{value}
+ }
+
+ if createPath && value.Kind() == reflect.Ptr && value.IsNil() {
+ // TODO if the value is the terminus it should not be created
+ // if the value to be set to its position is nil.
+ value.Set(reflect.New(value.Type().Elem()))
+ value = value.Elem()
+ } else {
+ value = reflect.Indirect(value)
+ }
+
+ if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
+ if !createPath && value.IsNil() {
+ value = reflect.ValueOf(nil)
+ }
+ }
+
+ if value.IsValid() {
+ nextvals = append(nextvals, value)
+ }
+ }
+ values = nextvals
+
+ if indexStar || index != nil {
+ nextvals = []reflect.Value{}
+ for _, value := range values {
+ value := reflect.Indirect(value)
+ if value.Kind() != reflect.Slice {
+ continue
+ }
+
+ if indexStar { // grab all indices
+ for i := 0; i < value.Len(); i++ {
+ idx := reflect.Indirect(value.Index(i))
+ if idx.IsValid() {
+ nextvals = append(nextvals, idx)
+ }
+ }
+ continue
+ }
+
+ // pull out index
+ i := int(*index)
+ if i >= value.Len() { // check out of bounds
+ if createPath {
+ // TODO resize slice
+ } else {
+ continue
+ }
+ } else if i < 0 { // support negative indexing
+ i = value.Len() + i
+ }
+ value = reflect.Indirect(value.Index(i))
+
+ if value.Kind() == reflect.Slice || value.Kind() == reflect.Map {
+ if !createPath && value.IsNil() {
+ value = reflect.ValueOf(nil)
+ }
+ }
+
+ if value.IsValid() {
+ nextvals = append(nextvals, value)
+ }
+ }
+ values = nextvals
+ }
+
+ components = components[1:]
+ }
+ return values
+}
+
+// ValuesAtPath returns a list of values at the case insensitive lexical
+// path inside of a structure.
+func ValuesAtPath(i interface{}, path string) ([]interface{}, error) {
+ result, err := jmespath.Search(path, i)
+ if err != nil {
+ return nil, err
+ }
+
+ v := reflect.ValueOf(result)
+ if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) {
+ return nil, nil
+ }
+ if s, ok := result.([]interface{}); ok {
+ return s, err
+ }
+ if v.Kind() == reflect.Map && v.Len() == 0 {
+ return nil, nil
+ }
+ if v.Kind() == reflect.Slice {
+ out := make([]interface{}, v.Len())
+ for i := 0; i < v.Len(); i++ {
+ out[i] = v.Index(i).Interface()
+ }
+ return out, nil
+ }
+
+ return []interface{}{result}, nil
+}
+
+// SetValueAtPath sets a value at the case insensitive lexical path inside
+// of a structure.
+func SetValueAtPath(i interface{}, path string, v interface{}) {
+ if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil {
+ for _, rval := range rvals {
+ if rval.Kind() == reflect.Ptr && rval.IsNil() {
+ continue
+ }
+ setValue(rval, v)
+ }
+ }
+}
+
+func setValue(dstVal reflect.Value, src interface{}) {
+ if dstVal.Kind() == reflect.Ptr {
+ dstVal = reflect.Indirect(dstVal)
+ }
+ srcVal := reflect.ValueOf(src)
+
+ if !srcVal.IsValid() { // src is literal nil
+ if dstVal.CanAddr() {
+ // Convert to pointer so that pointer's value can be nil'ed
+ // dstVal = dstVal.Addr()
+ }
+ dstVal.Set(reflect.Zero(dstVal.Type()))
+
+ } else if srcVal.Kind() == reflect.Ptr {
+ if srcVal.IsNil() {
+ srcVal = reflect.Zero(dstVal.Type())
+ } else {
+ srcVal = reflect.ValueOf(src).Elem()
+ }
+ dstVal.Set(srcVal)
+ } else {
+ dstVal.Set(srcVal)
+ }
+
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
new file mode 100644
index 0000000..fc38172
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go
@@ -0,0 +1,107 @@
+package awsutil
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "reflect"
+ "strings"
+)
+
+// Prettify returns the string representation of a value.
+func Prettify(i interface{}) string {
+ var buf bytes.Buffer
+ prettify(reflect.ValueOf(i), 0, &buf)
+ return buf.String()
+}
+
+// prettify will recursively walk value v to build a textual
+// representation of the value.
+func prettify(v reflect.Value, indent int, buf *bytes.Buffer) {
+ for v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ strtype := v.Type().String()
+ if strtype == "time.Time" {
+ fmt.Fprintf(buf, "%s", v.Interface())
+ break
+ } else if strings.HasPrefix(strtype, "io.") {
+ buf.WriteString("<buffer>")
+ break
+ }
+
+ buf.WriteString("{\n")
+
+ names := []string{}
+ for i := 0; i < v.Type().NumField(); i++ {
+ name := v.Type().Field(i).Name
+ f := v.Field(i)
+ if name[0:1] == strings.ToLower(name[0:1]) {
+ continue // ignore unexported fields
+ }
+ if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() {
+ continue // ignore unset fields
+ }
+ names = append(names, name)
+ }
+
+ for i, n := range names {
+ val := v.FieldByName(n)
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(n + ": ")
+ prettify(val, indent+2, buf)
+
+ if i < len(names)-1 {
+ buf.WriteString(",\n")
+ }
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ case reflect.Slice:
+ nl, id, id2 := "", "", ""
+ if v.Len() > 3 {
+ nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
+ }
+ buf.WriteString("[" + nl)
+ for i := 0; i < v.Len(); i++ {
+ buf.WriteString(id2)
+ prettify(v.Index(i), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString("," + nl)
+ }
+ }
+
+ buf.WriteString(nl + id + "]")
+ case reflect.Map:
+ buf.WriteString("{\n")
+
+ for i, k := range v.MapKeys() {
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(k.String() + ": ")
+ prettify(v.MapIndex(k), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString(",\n")
+ }
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ default:
+ if !v.IsValid() {
+ fmt.Fprint(buf, "<invalid value>")
+ return
+ }
+ format := "%v"
+ switch v.Interface().(type) {
+ case string:
+ format = "%q"
+ case io.ReadSeeker, io.Reader:
+ format = "buffer(%p)"
+ }
+ fmt.Fprintf(buf, format, v.Interface())
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
new file mode 100644
index 0000000..b6432f1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go
@@ -0,0 +1,89 @@
+package awsutil
+
+import (
+ "bytes"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+// StringValue returns the string representation of a value.
+func StringValue(i interface{}) string {
+ var buf bytes.Buffer
+ stringValue(reflect.ValueOf(i), 0, &buf)
+ return buf.String()
+}
+
+func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) {
+ for v.Kind() == reflect.Ptr {
+ v = v.Elem()
+ }
+
+ switch v.Kind() {
+ case reflect.Struct:
+ buf.WriteString("{\n")
+
+ names := []string{}
+ for i := 0; i < v.Type().NumField(); i++ {
+ name := v.Type().Field(i).Name
+ f := v.Field(i)
+ if name[0:1] == strings.ToLower(name[0:1]) {
+ continue // ignore unexported fields
+ }
+ if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() {
+ continue // ignore unset fields
+ }
+ names = append(names, name)
+ }
+
+ for i, n := range names {
+ val := v.FieldByName(n)
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(n + ": ")
+ stringValue(val, indent+2, buf)
+
+ if i < len(names)-1 {
+ buf.WriteString(",\n")
+ }
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ case reflect.Slice:
+ nl, id, id2 := "", "", ""
+ if v.Len() > 3 {
+ nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2)
+ }
+ buf.WriteString("[" + nl)
+ for i := 0; i < v.Len(); i++ {
+ buf.WriteString(id2)
+ stringValue(v.Index(i), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString("," + nl)
+ }
+ }
+
+ buf.WriteString(nl + id + "]")
+ case reflect.Map:
+ buf.WriteString("{\n")
+
+ for i, k := range v.MapKeys() {
+ buf.WriteString(strings.Repeat(" ", indent+2))
+ buf.WriteString(k.String() + ": ")
+ stringValue(v.MapIndex(k), indent+2, buf)
+
+ if i < v.Len()-1 {
+ buf.WriteString(",\n")
+ }
+ }
+
+ buf.WriteString("\n" + strings.Repeat(" ", indent) + "}")
+ default:
+ format := "%v"
+ switch v.Interface().(type) {
+ case string:
+ format = "%q"
+ }
+ fmt.Fprintf(buf, format, v.Interface())
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go
new file mode 100644
index 0000000..c8d0564
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go
@@ -0,0 +1,120 @@
+package client
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net/http/httputil"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// A Config provides configuration to a service client instance.
+type Config struct {
+ Config *aws.Config
+ Handlers request.Handlers
+ Endpoint, SigningRegion string
+}
+
+// ConfigProvider provides a generic way for a service client to receive
+// the ClientConfig without circular dependencies.
+type ConfigProvider interface {
+ ClientConfig(serviceName string, cfgs ...*aws.Config) Config
+}
+
+// A Client implements the base client request and response handling
+// used by all service clients.
+type Client struct {
+ request.Retryer
+ metadata.ClientInfo
+
+ Config aws.Config
+ Handlers request.Handlers
+}
+
+// New will return a pointer to a new initialized service client.
+func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client {
+ svc := &Client{
+ Config: cfg,
+ ClientInfo: info,
+ Handlers: handlers,
+ }
+
+ switch retryer, ok := cfg.Retryer.(request.Retryer); {
+ case ok:
+ svc.Retryer = retryer
+ case cfg.Retryer != nil && cfg.Logger != nil:
+ s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer)
+ cfg.Logger.Log(s)
+ fallthrough
+ default:
+ maxRetries := aws.IntValue(cfg.MaxRetries)
+ if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries {
+ maxRetries = 3
+ }
+ svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries}
+ }
+
+ svc.AddDebugHandlers()
+
+ for _, option := range options {
+ option(svc)
+ }
+
+ return svc
+}
+
+// NewRequest returns a new Request pointer for the service API
+// operation and parameters.
+func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request {
+ return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data)
+}
+
+// AddDebugHandlers injects debug logging handlers into the service to log request
+// debug information.
+func (c *Client) AddDebugHandlers() {
+ if !c.Config.LogLevel.AtLeast(aws.LogDebug) {
+ return
+ }
+
+ c.Handlers.Send.PushFront(logRequest)
+ c.Handlers.Send.PushBack(logResponse)
+}
+
+const logReqMsg = `DEBUG: Request %s/%s Details:
+---[ REQUEST POST-SIGN ]-----------------------------
+%s
+-----------------------------------------------------`
+
+func logRequest(r *request.Request) {
+ logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
+ dumpedBody, _ := httputil.DumpRequestOut(r.HTTPRequest, logBody)
+
+ if logBody {
+ // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's
+ // Body as a NoOpCloser and will not be reset after read by the HTTP
+ // client reader.
+ r.Body.Seek(r.BodyStart, 0)
+ r.HTTPRequest.Body = ioutil.NopCloser(r.Body)
+ }
+
+ r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody)))
+}
+
+const logRespMsg = `DEBUG: Response %s/%s Details:
+---[ RESPONSE ]--------------------------------------
+%s
+-----------------------------------------------------`
+
+func logResponse(r *request.Request) {
+ var msg = "no response data"
+ if r.HTTPResponse != nil {
+ logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
+ dumpedBody, _ := httputil.DumpResponse(r.HTTPResponse, logBody)
+ msg = string(dumpedBody)
+ } else if r.Error != nil {
+ msg = r.Error.Error()
+ }
+ r.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.ClientInfo.ServiceName, r.Operation.Name, msg))
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
new file mode 100644
index 0000000..43a3676
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
@@ -0,0 +1,90 @@
+package client
+
+import (
+ "math/rand"
+ "sync"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// DefaultRetryer implements basic retry logic using exponential backoff for
+// most services. If you want to implement custom retry logic, implement the
+// request.Retryer interface or create a structure type that composes this
+// struct and override the specific methods. For example, to override only
+// the MaxRetries method:
+//
+// type retryer struct {
+// service.DefaultRetryer
+// }
+//
+// // This implementation always has 100 max retries
+// func (d retryer) MaxRetries() uint { return 100 }
+type DefaultRetryer struct {
+ NumMaxRetries int
+}
+
+// MaxRetries returns the number of maximum returns the service will use to make
+// an individual API request.
+func (d DefaultRetryer) MaxRetries() int {
+ return d.NumMaxRetries
+}
+
+var seededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())})
+
+// RetryRules returns the delay duration before retrying this request again
+func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
+ // Set the upper limit of delay in retrying at ~five minutes
+ minTime := 30
+ throttle := d.shouldThrottle(r)
+ if throttle {
+ minTime = 500
+ }
+
+ retryCount := r.RetryCount
+ if retryCount > 13 {
+ retryCount = 13
+ } else if throttle && retryCount > 8 {
+ retryCount = 8
+ }
+
+ delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime)
+ return time.Duration(delay) * time.Millisecond
+}
+
+// ShouldRetry returns true if the request should be retried.
+func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
+ if r.HTTPResponse.StatusCode >= 500 {
+ return true
+ }
+ return r.IsErrorRetryable() || d.shouldThrottle(r)
+}
+
+// ShouldThrottle returns true if the request should be throttled.
+func (d DefaultRetryer) shouldThrottle(r *request.Request) bool {
+ if r.HTTPResponse.StatusCode == 502 ||
+ r.HTTPResponse.StatusCode == 503 ||
+ r.HTTPResponse.StatusCode == 504 {
+ return true
+ }
+ return r.IsErrorThrottle()
+}
+
+// lockedSource is a thread-safe implementation of rand.Source
+type lockedSource struct {
+ lk sync.Mutex
+ src rand.Source
+}
+
+func (r *lockedSource) Int63() (n int64) {
+ r.lk.Lock()
+ n = r.src.Int63()
+ r.lk.Unlock()
+ return
+}
+
+func (r *lockedSource) Seed(seed int64) {
+ r.lk.Lock()
+ r.src.Seed(seed)
+ r.lk.Unlock()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
new file mode 100644
index 0000000..4778056
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go
@@ -0,0 +1,12 @@
+package metadata
+
+// ClientInfo wraps immutable data from the client.Client structure.
+type ClientInfo struct {
+ ServiceName string
+ APIVersion string
+ Endpoint string
+ SigningName string
+ SigningRegion string
+ JSONVersion string
+ TargetPrefix string
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go
new file mode 100644
index 0000000..fa11f99
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go
@@ -0,0 +1,358 @@
+package aws
+
+import (
+ "net/http"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/credentials"
+)
+
+// UseServiceDefaultRetries instructs the config to use the service's own default
+// number of retries. This will be the default action if Config.MaxRetries
+// is nil also.
+const UseServiceDefaultRetries = -1
+
+// RequestRetryer is an alias for a type that implements the request.Retryer interface.
+type RequestRetryer interface{}
+
+// A Config provides service configuration for service clients. By default,
+// all clients will use the {defaults.DefaultConfig} structure.
+type Config struct {
+ // Enables verbose error printing of all credential chain errors.
+ // Should be used when wanting to see all errors while attempting to retreive
+ // credentials.
+ CredentialsChainVerboseErrors *bool
+
+ // The credentials object to use when signing requests. Defaults to
+ // a chain of credential providers to search for credentials in environment
+ // variables, shared credential file, and EC2 Instance Roles.
+ Credentials *credentials.Credentials
+
+ // An optional endpoint URL (hostname only or fully qualified URI)
+ // that overrides the default generated endpoint for a client. Set this
+ // to `""` to use the default generated endpoint.
+ //
+ // @note You must still provide a `Region` value when specifying an
+ // endpoint for a client.
+ Endpoint *string
+
+ // The region to send requests to. This parameter is required and must
+ // be configured globally or on a per-client basis unless otherwise
+ // noted. A full list of regions is found in the "Regions and Endpoints"
+ // document.
+ //
+ // @see http://docs.aws.amazon.com/general/latest/gr/rande.html
+ // AWS Regions and Endpoints
+ Region *string
+
+ // Set this to `true` to disable SSL when sending requests. Defaults
+ // to `false`.
+ DisableSSL *bool
+
+ // The HTTP client to use when sending requests. Defaults to
+ // `http.DefaultClient`.
+ HTTPClient *http.Client
+
+ // An integer value representing the logging level. The default log level
+ // is zero (LogOff), which represents no logging. To enable logging set
+ // to a LogLevel Value.
+ LogLevel *LogLevelType
+
+ // The logger writer interface to write logging messages to. Defaults to
+ // standard out.
+ Logger Logger
+
+ // The maximum number of times that a request will be retried for failures.
+ // Defaults to -1, which defers the max retry setting to the service specific
+ // configuration.
+ MaxRetries *int
+
+ // Retryer guides how HTTP requests should be retried in case of recoverable failures.
+ //
+ // When nil or the value does not implement the request.Retryer interface,
+ // the request.DefaultRetryer will be used.
+ //
+ // When both Retryer and MaxRetries are non-nil, the former is used and
+ // the latter ignored.
+ //
+ // To set the Retryer field in a type-safe manner and with chaining, use
+ // the request.WithRetryer helper function:
+ //
+ // cfg := request.WithRetryer(aws.NewConfig(), myRetryer)
+ //
+ Retryer RequestRetryer
+
+ // Disables semantic parameter validation, which validates input for missing
+ // required fields and/or other semantic request input errors.
+ DisableParamValidation *bool
+
+ // Disables the computation of request and response checksums, e.g.,
+ // CRC32 checksums in Amazon DynamoDB.
+ DisableComputeChecksums *bool
+
+ // Set this to `true` to force the request to use path-style addressing,
+ // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client will
+ // use virtual hosted bucket addressing when possible
+ // (`http://BUCKET.s3.amazonaws.com/KEY`).
+ //
+ // @note This configuration option is specific to the Amazon S3 service.
+ // @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html
+ // Amazon S3: Virtual Hosting of Buckets
+ S3ForcePathStyle *bool
+
+ // Set this to `true` to disable the SDK adding the `Expect: 100-Continue`
+ // header to PUT requests over 2MB of content. 100-Continue instructs the
+ // HTTP client not to send the body until the service responds with a
+ // `continue` status. This is useful to prevent sending the request body
+ // until after the request is authenticated, and validated.
+ //
+ // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html
+ //
+ // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s
+ // `ExpectContinueTimeout` for information on adjusting the continue wait timeout.
+ // https://golang.org/pkg/net/http/#Transport
+ //
+ // You should use this flag to disble 100-Continue if you experiance issues
+ // with proxies or thrid party S3 compatible services.
+ S3Disable100Continue *bool
+
+ // Set this to `true` to enable S3 Accelerate feature. For all operations compatible
+ // with S3 Accelerate will use the accelerate endpoint for requests. Requests not compatible
+ // will fall back to normal S3 requests.
+ //
+ // The bucket must be enable for accelerate to be used with S3 client with accelerate
+ // enabled. If the bucket is not enabled for accelerate an error will be returned.
+ // The bucket name must be DNS compatible to also work with accelerate.
+ S3UseAccelerate *bool
+
+ // Set this to `true` to disable the EC2Metadata client from overriding the
+ // default http.Client's Timeout. This is helpful if you do not want the EC2Metadata
+ // client to create a new http.Client. This options is only meaningful if you're not
+ // already using a custom HTTP client with the SDK. Enabled by default.
+ //
+ // Must be set and provided to the session.New() in order to disable the EC2Metadata
+ // overriding the timeout for default credentials chain.
+ //
+ // Example:
+ // sess := session.New(aws.NewConfig().WithEC2MetadataDiableTimeoutOverride(true))
+ // svc := s3.New(sess)
+ //
+ EC2MetadataDisableTimeoutOverride *bool
+
+ SleepDelay func(time.Duration)
+}
+
+// NewConfig returns a new Config pointer that can be chained with builder methods to
+// set multiple configuration values inline without using pointers.
+//
+// sess := session.New(aws.NewConfig().WithRegion("us-west-2").WithMaxRetries(10))
+//
+func NewConfig() *Config {
+ return &Config{}
+}
+
+// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning
+// a Config pointer.
+func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config {
+ c.CredentialsChainVerboseErrors = &verboseErrs
+ return c
+}
+
+// WithCredentials sets a config Credentials value returning a Config pointer
+// for chaining.
+func (c *Config) WithCredentials(creds *credentials.Credentials) *Config {
+ c.Credentials = creds
+ return c
+}
+
+// WithEndpoint sets a config Endpoint value returning a Config pointer for
+// chaining.
+func (c *Config) WithEndpoint(endpoint string) *Config {
+ c.Endpoint = &endpoint
+ return c
+}
+
+// WithRegion sets a config Region value returning a Config pointer for
+// chaining.
+func (c *Config) WithRegion(region string) *Config {
+ c.Region = &region
+ return c
+}
+
+// WithDisableSSL sets a config DisableSSL value returning a Config pointer
+// for chaining.
+func (c *Config) WithDisableSSL(disable bool) *Config {
+ c.DisableSSL = &disable
+ return c
+}
+
+// WithHTTPClient sets a config HTTPClient value returning a Config pointer
+// for chaining.
+func (c *Config) WithHTTPClient(client *http.Client) *Config {
+ c.HTTPClient = client
+ return c
+}
+
+// WithMaxRetries sets a config MaxRetries value returning a Config pointer
+// for chaining.
+func (c *Config) WithMaxRetries(max int) *Config {
+ c.MaxRetries = &max
+ return c
+}
+
+// WithDisableParamValidation sets a config DisableParamValidation value
+// returning a Config pointer for chaining.
+func (c *Config) WithDisableParamValidation(disable bool) *Config {
+ c.DisableParamValidation = &disable
+ return c
+}
+
+// WithDisableComputeChecksums sets a config DisableComputeChecksums value
+// returning a Config pointer for chaining.
+func (c *Config) WithDisableComputeChecksums(disable bool) *Config {
+ c.DisableComputeChecksums = &disable
+ return c
+}
+
+// WithLogLevel sets a config LogLevel value returning a Config pointer for
+// chaining.
+func (c *Config) WithLogLevel(level LogLevelType) *Config {
+ c.LogLevel = &level
+ return c
+}
+
+// WithLogger sets a config Logger value returning a Config pointer for
+// chaining.
+func (c *Config) WithLogger(logger Logger) *Config {
+ c.Logger = logger
+ return c
+}
+
+// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config
+// pointer for chaining.
+func (c *Config) WithS3ForcePathStyle(force bool) *Config {
+ c.S3ForcePathStyle = &force
+ return c
+}
+
+// WithS3Disable100Continue sets a config S3Disable100Continue value returning
+// a Config pointer for chaining.
+func (c *Config) WithS3Disable100Continue(disable bool) *Config {
+ c.S3Disable100Continue = &disable
+ return c
+}
+
+// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config
+// pointer for chaining.
+func (c *Config) WithS3UseAccelerate(enable bool) *Config {
+ c.S3UseAccelerate = &enable
+ return c
+}
+
+// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value
+// returning a Config pointer for chaining.
+func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config {
+ c.EC2MetadataDisableTimeoutOverride = &enable
+ return c
+}
+
+// WithSleepDelay overrides the function used to sleep while waiting for the
+// next retry. Defaults to time.Sleep.
+func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config {
+ c.SleepDelay = fn
+ return c
+}
+
+// MergeIn merges the passed in configs into the existing config object.
+func (c *Config) MergeIn(cfgs ...*Config) {
+ for _, other := range cfgs {
+ mergeInConfig(c, other)
+ }
+}
+
+func mergeInConfig(dst *Config, other *Config) {
+ if other == nil {
+ return
+ }
+
+ if other.CredentialsChainVerboseErrors != nil {
+ dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors
+ }
+
+ if other.Credentials != nil {
+ dst.Credentials = other.Credentials
+ }
+
+ if other.Endpoint != nil {
+ dst.Endpoint = other.Endpoint
+ }
+
+ if other.Region != nil {
+ dst.Region = other.Region
+ }
+
+ if other.DisableSSL != nil {
+ dst.DisableSSL = other.DisableSSL
+ }
+
+ if other.HTTPClient != nil {
+ dst.HTTPClient = other.HTTPClient
+ }
+
+ if other.LogLevel != nil {
+ dst.LogLevel = other.LogLevel
+ }
+
+ if other.Logger != nil {
+ dst.Logger = other.Logger
+ }
+
+ if other.MaxRetries != nil {
+ dst.MaxRetries = other.MaxRetries
+ }
+
+ if other.Retryer != nil {
+ dst.Retryer = other.Retryer
+ }
+
+ if other.DisableParamValidation != nil {
+ dst.DisableParamValidation = other.DisableParamValidation
+ }
+
+ if other.DisableComputeChecksums != nil {
+ dst.DisableComputeChecksums = other.DisableComputeChecksums
+ }
+
+ if other.S3ForcePathStyle != nil {
+ dst.S3ForcePathStyle = other.S3ForcePathStyle
+ }
+
+ if other.S3Disable100Continue != nil {
+ dst.S3Disable100Continue = other.S3Disable100Continue
+ }
+
+ if other.S3UseAccelerate != nil {
+ dst.S3UseAccelerate = other.S3UseAccelerate
+ }
+
+ if other.EC2MetadataDisableTimeoutOverride != nil {
+ dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride
+ }
+
+ if other.SleepDelay != nil {
+ dst.SleepDelay = other.SleepDelay
+ }
+}
+
+// Copy will return a shallow copy of the Config object. If any additional
+// configurations are provided they will be merged into the new config returned.
+func (c *Config) Copy(cfgs ...*Config) *Config {
+ dst := &Config{}
+ dst.MergeIn(c)
+
+ for _, cfg := range cfgs {
+ dst.MergeIn(cfg)
+ }
+
+ return dst
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
new file mode 100644
index 0000000..cff5c5c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
@@ -0,0 +1,369 @@
+package aws
+
+import "time"
+
+// String returns a pointer to of the string value passed in.
+func String(v string) *string {
+ return &v
+}
+
+// StringValue returns the value of the string pointer passed in or
+// "" if the pointer is nil.
+func StringValue(v *string) string {
+ if v != nil {
+ return *v
+ }
+ return ""
+}
+
+// StringSlice converts a slice of string values into a slice of
+// string pointers
+func StringSlice(src []string) []*string {
+ dst := make([]*string, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// StringValueSlice converts a slice of string pointers into a slice of
+// string values
+func StringValueSlice(src []*string) []string {
+ dst := make([]string, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// StringMap converts a string map of string values into a string
+// map of string pointers
+func StringMap(src map[string]string) map[string]*string {
+ dst := make(map[string]*string)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// StringValueMap converts a string map of string pointers into a string
+// map of string values
+func StringValueMap(src map[string]*string) map[string]string {
+ dst := make(map[string]string)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Bool returns a pointer to of the bool value passed in.
+func Bool(v bool) *bool {
+ return &v
+}
+
+// BoolValue returns the value of the bool pointer passed in or
+// false if the pointer is nil.
+func BoolValue(v *bool) bool {
+ if v != nil {
+ return *v
+ }
+ return false
+}
+
+// BoolSlice converts a slice of bool values into a slice of
+// bool pointers
+func BoolSlice(src []bool) []*bool {
+ dst := make([]*bool, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// BoolValueSlice converts a slice of bool pointers into a slice of
+// bool values
+func BoolValueSlice(src []*bool) []bool {
+ dst := make([]bool, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// BoolMap converts a string map of bool values into a string
+// map of bool pointers
+func BoolMap(src map[string]bool) map[string]*bool {
+ dst := make(map[string]*bool)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// BoolValueMap converts a string map of bool pointers into a string
+// map of bool values
+func BoolValueMap(src map[string]*bool) map[string]bool {
+ dst := make(map[string]bool)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Int returns a pointer to of the int value passed in.
+func Int(v int) *int {
+ return &v
+}
+
+// IntValue returns the value of the int pointer passed in or
+// 0 if the pointer is nil.
+func IntValue(v *int) int {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// IntSlice converts a slice of int values into a slice of
+// int pointers
+func IntSlice(src []int) []*int {
+ dst := make([]*int, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// IntValueSlice converts a slice of int pointers into a slice of
+// int values
+func IntValueSlice(src []*int) []int {
+ dst := make([]int, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// IntMap converts a string map of int values into a string
+// map of int pointers
+func IntMap(src map[string]int) map[string]*int {
+ dst := make(map[string]*int)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// IntValueMap converts a string map of int pointers into a string
+// map of int values
+func IntValueMap(src map[string]*int) map[string]int {
+ dst := make(map[string]int)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Int64 returns a pointer to of the int64 value passed in.
+func Int64(v int64) *int64 {
+ return &v
+}
+
+// Int64Value returns the value of the int64 pointer passed in or
+// 0 if the pointer is nil.
+func Int64Value(v *int64) int64 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Int64Slice converts a slice of int64 values into a slice of
+// int64 pointers
+func Int64Slice(src []int64) []*int64 {
+ dst := make([]*int64, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Int64ValueSlice converts a slice of int64 pointers into a slice of
+// int64 values
+func Int64ValueSlice(src []*int64) []int64 {
+ dst := make([]int64, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Int64Map converts a string map of int64 values into a string
+// map of int64 pointers
+func Int64Map(src map[string]int64) map[string]*int64 {
+ dst := make(map[string]*int64)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Int64ValueMap converts a string map of int64 pointers into a string
+// map of int64 values
+func Int64ValueMap(src map[string]*int64) map[string]int64 {
+ dst := make(map[string]int64)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Float64 returns a pointer to of the float64 value passed in.
+func Float64(v float64) *float64 {
+ return &v
+}
+
+// Float64Value returns the value of the float64 pointer passed in or
+// 0 if the pointer is nil.
+func Float64Value(v *float64) float64 {
+ if v != nil {
+ return *v
+ }
+ return 0
+}
+
+// Float64Slice converts a slice of float64 values into a slice of
+// float64 pointers
+func Float64Slice(src []float64) []*float64 {
+ dst := make([]*float64, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// Float64ValueSlice converts a slice of float64 pointers into a slice of
+// float64 values
+func Float64ValueSlice(src []*float64) []float64 {
+ dst := make([]float64, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// Float64Map converts a string map of float64 values into a string
+// map of float64 pointers
+func Float64Map(src map[string]float64) map[string]*float64 {
+ dst := make(map[string]*float64)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// Float64ValueMap converts a string map of float64 pointers into a string
+// map of float64 values
+func Float64ValueMap(src map[string]*float64) map[string]float64 {
+ dst := make(map[string]float64)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
+
+// Time returns a pointer to of the time.Time value passed in.
+func Time(v time.Time) *time.Time {
+ return &v
+}
+
+// TimeValue returns the value of the time.Time pointer passed in or
+// time.Time{} if the pointer is nil.
+func TimeValue(v *time.Time) time.Time {
+ if v != nil {
+ return *v
+ }
+ return time.Time{}
+}
+
+// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC".
+// The result is undefined if the Unix time cannot be represented by an int64.
+// Which includes calling TimeUnixMilli on a zero Time is undefined.
+//
+// This utility is useful for service API's such as CloudWatch Logs which require
+// their unix time values to be in milliseconds.
+//
+// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information.
+func TimeUnixMilli(t time.Time) int64 {
+ return t.UnixNano() / int64(time.Millisecond/time.Nanosecond)
+}
+
+// TimeSlice converts a slice of time.Time values into a slice of
+// time.Time pointers
+func TimeSlice(src []time.Time) []*time.Time {
+ dst := make([]*time.Time, len(src))
+ for i := 0; i < len(src); i++ {
+ dst[i] = &(src[i])
+ }
+ return dst
+}
+
+// TimeValueSlice converts a slice of time.Time pointers into a slice of
+// time.Time values
+func TimeValueSlice(src []*time.Time) []time.Time {
+ dst := make([]time.Time, len(src))
+ for i := 0; i < len(src); i++ {
+ if src[i] != nil {
+ dst[i] = *(src[i])
+ }
+ }
+ return dst
+}
+
+// TimeMap converts a string map of time.Time values into a string
+// map of time.Time pointers
+func TimeMap(src map[string]time.Time) map[string]*time.Time {
+ dst := make(map[string]*time.Time)
+ for k, val := range src {
+ v := val
+ dst[k] = &v
+ }
+ return dst
+}
+
+// TimeValueMap converts a string map of time.Time pointers into a string
+// map of time.Time values
+func TimeValueMap(src map[string]*time.Time) map[string]time.Time {
+ dst := make(map[string]time.Time)
+ for k, val := range src {
+ if val != nil {
+ dst[k] = *val
+ }
+ }
+ return dst
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
new file mode 100644
index 0000000..8456e29
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
@@ -0,0 +1,152 @@
+package corehandlers
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "regexp"
+ "runtime"
+ "strconv"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// Interface for matching types which also have a Len method.
+type lener interface {
+ Len() int
+}
+
+// BuildContentLengthHandler builds the content length of a request based on the body,
+// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable
+// to determine request body length and no "Content-Length" was specified it will panic.
+//
+// The Content-Length will only be aded to the request if the length of the body
+// is greater than 0. If the body is empty or the current `Content-Length`
+// header is <= 0, the header will also be stripped.
+var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) {
+ var length int64
+
+ if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" {
+ length, _ = strconv.ParseInt(slength, 10, 64)
+ } else {
+ switch body := r.Body.(type) {
+ case nil:
+ length = 0
+ case lener:
+ length = int64(body.Len())
+ case io.Seeker:
+ r.BodyStart, _ = body.Seek(0, 1)
+ end, _ := body.Seek(0, 2)
+ body.Seek(r.BodyStart, 0) // make sure to seek back to original location
+ length = end - r.BodyStart
+ default:
+ panic("Cannot get length of body, must provide `ContentLength`")
+ }
+ }
+
+ if length > 0 {
+ r.HTTPRequest.ContentLength = length
+ r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length))
+ } else {
+ r.HTTPRequest.ContentLength = 0
+ r.HTTPRequest.Header.Del("Content-Length")
+ }
+}}
+
+// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent.
+var SDKVersionUserAgentHandler = request.NamedHandler{
+ Name: "core.SDKVersionUserAgentHandler",
+ Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion,
+ runtime.Version(), runtime.GOOS, runtime.GOARCH),
+}
+
+var reStatusCode = regexp.MustCompile(`^(\d{3})`)
+
+// SendHandler is a request handler to send service request using HTTP client.
+var SendHandler = request.NamedHandler{Name: "core.SendHandler", Fn: func(r *request.Request) {
+ var err error
+ r.HTTPResponse, err = r.Config.HTTPClient.Do(r.HTTPRequest)
+ if err != nil {
+ // Prevent leaking if an HTTPResponse was returned. Clean up
+ // the body.
+ if r.HTTPResponse != nil {
+ r.HTTPResponse.Body.Close()
+ }
+ // Capture the case where url.Error is returned for error processing
+ // response. e.g. 301 without location header comes back as string
+ // error and r.HTTPResponse is nil. Other url redirect errors will
+ // comeback in a similar method.
+ if e, ok := err.(*url.Error); ok && e.Err != nil {
+ if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil {
+ code, _ := strconv.ParseInt(s[1], 10, 64)
+ r.HTTPResponse = &http.Response{
+ StatusCode: int(code),
+ Status: http.StatusText(int(code)),
+ Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
+ }
+ return
+ }
+ }
+ if r.HTTPResponse == nil {
+ // Add a dummy request response object to ensure the HTTPResponse
+ // value is consistent.
+ r.HTTPResponse = &http.Response{
+ StatusCode: int(0),
+ Status: http.StatusText(int(0)),
+ Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
+ }
+ }
+ // Catch all other request errors.
+ r.Error = awserr.New("RequestError", "send request failed", err)
+ r.Retryable = aws.Bool(true) // network errors are retryable
+ }
+}}
+
+// ValidateResponseHandler is a request handler to validate service response.
+var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) {
+ if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 {
+ // this may be replaced by an UnmarshalError handler
+ r.Error = awserr.New("UnknownError", "unknown error", nil)
+ }
+}}
+
+// AfterRetryHandler performs final checks to determine if the request should
+// be retried and how long to delay.
+var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) {
+ // If one of the other handlers already set the retry state
+ // we don't want to override it based on the service's state
+ if r.Retryable == nil {
+ r.Retryable = aws.Bool(r.ShouldRetry(r))
+ }
+
+ if r.WillRetry() {
+ r.RetryDelay = r.RetryRules(r)
+ r.Config.SleepDelay(r.RetryDelay)
+
+ // when the expired token exception occurs the credentials
+ // need to be expired locally so that the next request to
+ // get credentials will trigger a credentials refresh.
+ if r.IsErrorExpired() {
+ r.Config.Credentials.Expire()
+ }
+
+ r.RetryCount++
+ r.Error = nil
+ }
+}}
+
+// ValidateEndpointHandler is a request handler to validate a request had the
+// appropriate Region and Endpoint set. Will set r.Error if the endpoint or
+// region is not valid.
+var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) {
+ if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" {
+ r.Error = aws.ErrMissingRegion
+ } else if r.ClientInfo.Endpoint == "" {
+ r.Error = aws.ErrMissingEndpoint
+ }
+}}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go
new file mode 100644
index 0000000..7d50b15
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go
@@ -0,0 +1,17 @@
+package corehandlers
+
+import "github.com/aws/aws-sdk-go/aws/request"
+
+// ValidateParametersHandler is a request handler to validate the input parameters.
+// Validating parameters only has meaning if done prior to the request being sent.
+var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) {
+ if !r.ParamsFilled() {
+ return
+ }
+
+ if v, ok := r.Params.(request.Validator); ok {
+ if err := v.Validate(); err != nil {
+ r.Error = err
+ }
+ }
+}}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
new file mode 100644
index 0000000..857311f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
@@ -0,0 +1,100 @@
+package credentials
+
+import (
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+var (
+ // ErrNoValidProvidersFoundInChain Is returned when there are no valid
+ // providers in the ChainProvider.
+ //
+ // This has been deprecated. For verbose error messaging set
+ // aws.Config.CredentialsChainVerboseErrors to true
+ //
+ // @readonly
+ ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders",
+ `no valid providers in chain. Deprecated.
+ For verbose messaging see aws.Config.CredentialsChainVerboseErrors`,
+ nil)
+)
+
+// A ChainProvider will search for a provider which returns credentials
+// and cache that provider until Retrieve is called again.
+//
+// The ChainProvider provides a way of chaining multiple providers together
+// which will pick the first available using priority order of the Providers
+// in the list.
+//
+// If none of the Providers retrieve valid credentials Value, ChainProvider's
+// Retrieve() will return the error ErrNoValidProvidersFoundInChain.
+//
+// If a Provider is found which returns valid credentials Value ChainProvider
+// will cache that Provider for all calls to IsExpired(), until Retrieve is
+// called again.
+//
+// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider.
+// In this example EnvProvider will first check if any credentials are available
+// vai the environment variables. If there are none ChainProvider will check
+// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider
+// does not return any credentials ChainProvider will return the error
+// ErrNoValidProvidersFoundInChain
+//
+// creds := NewChainCredentials(
+// []Provider{
+// &EnvProvider{},
+// &EC2RoleProvider{
+// Client: ec2metadata.New(sess),
+// },
+// })
+//
+// // Usage of ChainCredentials with aws.Config
+// svc := ec2.New(&aws.Config{Credentials: creds})
+//
+type ChainProvider struct {
+ Providers []Provider
+ curr Provider
+ VerboseErrors bool
+}
+
+// NewChainCredentials returns a pointer to a new Credentials object
+// wrapping a chain of providers.
+func NewChainCredentials(providers []Provider) *Credentials {
+ return NewCredentials(&ChainProvider{
+ Providers: append([]Provider{}, providers...),
+ })
+}
+
+// Retrieve returns the credentials value or error if no provider returned
+// without error.
+//
+// If a provider is found it will be cached and any calls to IsExpired()
+// will return the expired state of the cached provider.
+func (c *ChainProvider) Retrieve() (Value, error) {
+ var errs []error
+ for _, p := range c.Providers {
+ creds, err := p.Retrieve()
+ if err == nil {
+ c.curr = p
+ return creds, nil
+ }
+ errs = append(errs, err)
+ }
+ c.curr = nil
+
+ var err error
+ err = ErrNoValidProvidersFoundInChain
+ if c.VerboseErrors {
+ err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs)
+ }
+ return Value{}, err
+}
+
+// IsExpired will returned the expired state of the currently cached provider
+// if there is one. If there is no current provider, true will be returned.
+func (c *ChainProvider) IsExpired() bool {
+ if c.curr != nil {
+ return c.curr.IsExpired()
+ }
+
+ return true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
new file mode 100644
index 0000000..7b8ebf5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
@@ -0,0 +1,223 @@
+// Package credentials provides credential retrieval and management
+//
+// The Credentials is the primary method of getting access to and managing
+// credentials Values. Using dependency injection retrieval of the credential
+// values is handled by a object which satisfies the Provider interface.
+//
+// By default the Credentials.Get() will cache the successful result of a
+// Provider's Retrieve() until Provider.IsExpired() returns true. At which
+// point Credentials will call Provider's Retrieve() to get new credential Value.
+//
+// The Provider is responsible for determining when credentials Value have expired.
+// It is also important to note that Credentials will always call Retrieve the
+// first time Credentials.Get() is called.
+//
+// Example of using the environment variable credentials.
+//
+// creds := NewEnvCredentials()
+//
+// // Retrieve the credentials value
+// credValue, err := creds.Get()
+// if err != nil {
+// // handle error
+// }
+//
+// Example of forcing credentials to expire and be refreshed on the next Get().
+// This may be helpful to proactively expire credentials and refresh them sooner
+// than they would naturally expire on their own.
+//
+// creds := NewCredentials(&EC2RoleProvider{})
+// creds.Expire()
+// credsValue, err := creds.Get()
+// // New credentials will be retrieved instead of from cache.
+//
+//
+// Custom Provider
+//
+// Each Provider built into this package also provides a helper method to generate
+// a Credentials pointer setup with the provider. To use a custom Provider just
+// create a type which satisfies the Provider interface and pass it to the
+// NewCredentials method.
+//
+// type MyProvider struct{}
+// func (m *MyProvider) Retrieve() (Value, error) {...}
+// func (m *MyProvider) IsExpired() bool {...}
+//
+// creds := NewCredentials(&MyProvider{})
+// credValue, err := creds.Get()
+//
+package credentials
+
+import (
+ "sync"
+ "time"
+)
+
+// AnonymousCredentials is an empty Credential object that can be used as
+// dummy placeholder credentials for requests that do not need signed.
+//
+// This Credentials can be used to configure a service to not sign requests
+// when making service API calls. For example, when accessing public
+// s3 buckets.
+//
+// svc := s3.New(&aws.Config{Credentials: AnonymousCredentials})
+// // Access public S3 buckets.
+//
+// @readonly
+var AnonymousCredentials = NewStaticCredentials("", "", "")
+
+// A Value is the AWS credentials value for individual credential fields.
+type Value struct {
+ // AWS Access key ID
+ AccessKeyID string
+
+ // AWS Secret Access Key
+ SecretAccessKey string
+
+ // AWS Session Token
+ SessionToken string
+
+ // Provider used to get credentials
+ ProviderName string
+}
+
+// A Provider is the interface for any component which will provide credentials
+// Value. A provider is required to manage its own Expired state, and what to
+// be expired means.
+//
+// The Provider should not need to implement its own mutexes, because
+// that will be managed by Credentials.
+type Provider interface {
+ // Refresh returns nil if it successfully retrieved the value.
+ // Error is returned if the value were not obtainable, or empty.
+ Retrieve() (Value, error)
+
+ // IsExpired returns if the credentials are no longer valid, and need
+ // to be retrieved.
+ IsExpired() bool
+}
+
+// A Expiry provides shared expiration logic to be used by credentials
+// providers to implement expiry functionality.
+//
+// The best method to use this struct is as an anonymous field within the
+// provider's struct.
+//
+// Example:
+// type EC2RoleProvider struct {
+// Expiry
+// ...
+// }
+type Expiry struct {
+ // The date/time when to expire on
+ expiration time.Time
+
+ // If set will be used by IsExpired to determine the current time.
+ // Defaults to time.Now if CurrentTime is not set. Available for testing
+ // to be able to mock out the current time.
+ CurrentTime func() time.Time
+}
+
+// SetExpiration sets the expiration IsExpired will check when called.
+//
+// If window is greater than 0 the expiration time will be reduced by the
+// window value.
+//
+// Using a window is helpful to trigger credentials to expire sooner than
+// the expiration time given to ensure no requests are made with expired
+// tokens.
+func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
+ e.expiration = expiration
+ if window > 0 {
+ e.expiration = e.expiration.Add(-window)
+ }
+}
+
+// IsExpired returns if the credentials are expired.
+func (e *Expiry) IsExpired() bool {
+ if e.CurrentTime == nil {
+ e.CurrentTime = time.Now
+ }
+ return e.expiration.Before(e.CurrentTime())
+}
+
+// A Credentials provides synchronous safe retrieval of AWS credentials Value.
+// Credentials will cache the credentials value until they expire. Once the value
+// expires the next Get will attempt to retrieve valid credentials.
+//
+// Credentials is safe to use across multiple goroutines and will manage the
+// synchronous state so the Providers do not need to implement their own
+// synchronization.
+//
+// The first Credentials.Get() will always call Provider.Retrieve() to get the
+// first instance of the credentials Value. All calls to Get() after that
+// will return the cached credentials Value until IsExpired() returns true.
+type Credentials struct {
+ creds Value
+ forceRefresh bool
+ m sync.Mutex
+
+ provider Provider
+}
+
+// NewCredentials returns a pointer to a new Credentials with the provider set.
+func NewCredentials(provider Provider) *Credentials {
+ return &Credentials{
+ provider: provider,
+ forceRefresh: true,
+ }
+}
+
+// Get returns the credentials value, or error if the credentials Value failed
+// to be retrieved.
+//
+// Will return the cached credentials Value if it has not expired. If the
+// credentials Value has expired the Provider's Retrieve() will be called
+// to refresh the credentials.
+//
+// If Credentials.Expire() was called the credentials Value will be force
+// expired, and the next call to Get() will cause them to be refreshed.
+func (c *Credentials) Get() (Value, error) {
+ c.m.Lock()
+ defer c.m.Unlock()
+
+ if c.isExpired() {
+ creds, err := c.provider.Retrieve()
+ if err != nil {
+ return Value{}, err
+ }
+ c.creds = creds
+ c.forceRefresh = false
+ }
+
+ return c.creds, nil
+}
+
+// Expire expires the credentials and forces them to be retrieved on the
+// next call to Get().
+//
+// This will override the Provider's expired state, and force Credentials
+// to call the Provider's Retrieve().
+func (c *Credentials) Expire() {
+ c.m.Lock()
+ defer c.m.Unlock()
+
+ c.forceRefresh = true
+}
+
+// IsExpired returns if the credentials are no longer valid, and need
+// to be retrieved.
+//
+// If the Credentials were forced to be expired with Expire() this will
+// reflect that override.
+func (c *Credentials) IsExpired() bool {
+ c.m.Lock()
+ defer c.m.Unlock()
+
+ return c.isExpired()
+}
+
+// isExpired helper method wrapping the definition of expired credentials.
+func (c *Credentials) isExpired() bool {
+ return c.forceRefresh || c.provider.IsExpired()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
new file mode 100644
index 0000000..aa9d689
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go
@@ -0,0 +1,178 @@
+package ec2rolecreds
+
+import (
+ "bufio"
+ "encoding/json"
+ "fmt"
+ "path"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/ec2metadata"
+)
+
+// ProviderName provides a name of EC2Role provider
+const ProviderName = "EC2RoleProvider"
+
+// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if
+// those credentials are expired.
+//
+// Example how to configure the EC2RoleProvider with custom http Client, Endpoint
+// or ExpiryWindow
+//
+// p := &ec2rolecreds.EC2RoleProvider{
+// // Pass in a custom timeout to be used when requesting
+// // IAM EC2 Role credentials.
+// Client: ec2metadata.New(sess, aws.Config{
+// HTTPClient: &http.Client{Timeout: 10 * time.Second},
+// }),
+//
+// // Do not use early expiry of credentials. If a non zero value is
+// // specified the credentials will be expired early
+// ExpiryWindow: 0,
+// }
+type EC2RoleProvider struct {
+ credentials.Expiry
+
+ // Required EC2Metadata client to use when connecting to EC2 metadata service.
+ Client *ec2metadata.EC2Metadata
+
+ // ExpiryWindow will allow the credentials to trigger refreshing prior to
+ // the credentials actually expiring. This is beneficial so race conditions
+ // with expiring credentials do not cause request to fail unexpectedly
+ // due to ExpiredTokenException exceptions.
+ //
+ // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true
+ // 10 seconds before the credentials are actually expired.
+ //
+ // If ExpiryWindow is 0 or less it will be ignored.
+ ExpiryWindow time.Duration
+}
+
+// NewCredentials returns a pointer to a new Credentials object wrapping
+// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client.
+// The ConfigProvider is satisfied by the session.Session type.
+func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials {
+ p := &EC2RoleProvider{
+ Client: ec2metadata.New(c),
+ }
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return credentials.NewCredentials(p)
+}
+
+// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping
+// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2
+// metadata service.
+func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials {
+ p := &EC2RoleProvider{
+ Client: client,
+ }
+
+ for _, option := range options {
+ option(p)
+ }
+
+ return credentials.NewCredentials(p)
+}
+
+// Retrieve retrieves credentials from the EC2 service.
+// Error will be returned if the request fails, or unable to extract
+// the desired credentials.
+func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) {
+ credsList, err := requestCredList(m.Client)
+ if err != nil {
+ return credentials.Value{ProviderName: ProviderName}, err
+ }
+
+ if len(credsList) == 0 {
+ return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil)
+ }
+ credsName := credsList[0]
+
+ roleCreds, err := requestCred(m.Client, credsName)
+ if err != nil {
+ return credentials.Value{ProviderName: ProviderName}, err
+ }
+
+ m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow)
+
+ return credentials.Value{
+ AccessKeyID: roleCreds.AccessKeyID,
+ SecretAccessKey: roleCreds.SecretAccessKey,
+ SessionToken: roleCreds.Token,
+ ProviderName: ProviderName,
+ }, nil
+}
+
+// A ec2RoleCredRespBody provides the shape for unmarshalling credential
+// request responses.
+type ec2RoleCredRespBody struct {
+ // Success State
+ Expiration time.Time
+ AccessKeyID string
+ SecretAccessKey string
+ Token string
+
+ // Error state
+ Code string
+ Message string
+}
+
+const iamSecurityCredsPath = "/iam/security-credentials"
+
+// requestCredList requests a list of credentials from the EC2 service.
+// If there are no credentials, or there is an error making or receiving the request
+func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) {
+ resp, err := client.GetMetadata(iamSecurityCredsPath)
+ if err != nil {
+ return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err)
+ }
+
+ credsList := []string{}
+ s := bufio.NewScanner(strings.NewReader(resp))
+ for s.Scan() {
+ credsList = append(credsList, s.Text())
+ }
+
+ if err := s.Err(); err != nil {
+ return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err)
+ }
+
+ return credsList, nil
+}
+
+// requestCred requests the credentials for a specific credentials from the EC2 service.
+//
+// If the credentials cannot be found, or there is an error reading the response
+// and error will be returned.
+func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) {
+ resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName))
+ if err != nil {
+ return ec2RoleCredRespBody{},
+ awserr.New("EC2RoleRequestError",
+ fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName),
+ err)
+ }
+
+ respCreds := ec2RoleCredRespBody{}
+ if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil {
+ return ec2RoleCredRespBody{},
+ awserr.New("SerializationError",
+ fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName),
+ err)
+ }
+
+ if respCreds.Code != "Success" {
+ // If an error code was returned something failed requesting the role.
+ return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil)
+ }
+
+ return respCreds, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
new file mode 100644
index 0000000..96655bc
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
@@ -0,0 +1,77 @@
+package credentials
+
+import (
+ "os"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// EnvProviderName provides a name of Env provider
+const EnvProviderName = "EnvProvider"
+
+var (
+ // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be
+ // found in the process's environment.
+ //
+ // @readonly
+ ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil)
+
+ // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key
+ // can't be found in the process's environment.
+ //
+ // @readonly
+ ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil)
+)
+
+// A EnvProvider retrieves credentials from the environment variables of the
+// running process. Environment credentials never expire.
+//
+// Environment variables used:
+//
+// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY
+// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
+type EnvProvider struct {
+ retrieved bool
+}
+
+// NewEnvCredentials returns a pointer to a new Credentials object
+// wrapping the environment variable provider.
+func NewEnvCredentials() *Credentials {
+ return NewCredentials(&EnvProvider{})
+}
+
+// Retrieve retrieves the keys from the environment.
+func (e *EnvProvider) Retrieve() (Value, error) {
+ e.retrieved = false
+
+ id := os.Getenv("AWS_ACCESS_KEY_ID")
+ if id == "" {
+ id = os.Getenv("AWS_ACCESS_KEY")
+ }
+
+ secret := os.Getenv("AWS_SECRET_ACCESS_KEY")
+ if secret == "" {
+ secret = os.Getenv("AWS_SECRET_KEY")
+ }
+
+ if id == "" {
+ return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound
+ }
+
+ if secret == "" {
+ return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound
+ }
+
+ e.retrieved = true
+ return Value{
+ AccessKeyID: id,
+ SecretAccessKey: secret,
+ SessionToken: os.Getenv("AWS_SESSION_TOKEN"),
+ ProviderName: EnvProviderName,
+ }, nil
+}
+
+// IsExpired returns if the credentials have been retrieved.
+func (e *EnvProvider) IsExpired() bool {
+ return !e.retrieved
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini
new file mode 100644
index 0000000..7fc91d9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini
@@ -0,0 +1,12 @@
+[default]
+aws_access_key_id = accessKey
+aws_secret_access_key = secret
+aws_session_token = token
+
+[no_token]
+aws_access_key_id = accessKey
+aws_secret_access_key = secret
+
+[with_colon]
+aws_access_key_id: accessKey
+aws_secret_access_key: secret
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
new file mode 100644
index 0000000..7fb7cbf
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
@@ -0,0 +1,151 @@
+package credentials
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/go-ini/ini"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// SharedCredsProviderName provides a name of SharedCreds provider
+const SharedCredsProviderName = "SharedCredentialsProvider"
+
+var (
+ // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found.
+ //
+ // @readonly
+ ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil)
+)
+
+// A SharedCredentialsProvider retrieves credentials from the current user's home
+// directory, and keeps track if those credentials are expired.
+//
+// Profile ini file example: $HOME/.aws/credentials
+type SharedCredentialsProvider struct {
+ // Path to the shared credentials file.
+ //
+ // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the
+ // env value is empty will default to current user's home directory.
+ // Linux/OSX: "$HOME/.aws/credentials"
+ // Windows: "%USERPROFILE%\.aws\credentials"
+ Filename string
+
+ // AWS Profile to extract credentials from the shared credentials file. If empty
+ // will default to environment variable "AWS_PROFILE" or "default" if
+ // environment variable is also not set.
+ Profile string
+
+ // retrieved states if the credentials have been successfully retrieved.
+ retrieved bool
+}
+
+// NewSharedCredentials returns a pointer to a new Credentials object
+// wrapping the Profile file provider.
+func NewSharedCredentials(filename, profile string) *Credentials {
+ return NewCredentials(&SharedCredentialsProvider{
+ Filename: filename,
+ Profile: profile,
+ })
+}
+
+// Retrieve reads and extracts the shared credentials from the current
+// users home directory.
+func (p *SharedCredentialsProvider) Retrieve() (Value, error) {
+ p.retrieved = false
+
+ filename, err := p.filename()
+ if err != nil {
+ return Value{ProviderName: SharedCredsProviderName}, err
+ }
+
+ creds, err := loadProfile(filename, p.profile())
+ if err != nil {
+ return Value{ProviderName: SharedCredsProviderName}, err
+ }
+
+ p.retrieved = true
+ return creds, nil
+}
+
+// IsExpired returns if the shared credentials have expired.
+func (p *SharedCredentialsProvider) IsExpired() bool {
+ return !p.retrieved
+}
+
+// loadProfiles loads from the file pointed to by shared credentials filename for profile.
+// The credentials retrieved from the profile will be returned or error. Error will be
+// returned if it fails to read from the file, or the data is invalid.
+func loadProfile(filename, profile string) (Value, error) {
+ config, err := ini.Load(filename)
+ if err != nil {
+ return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err)
+ }
+ iniProfile, err := config.GetSection(profile)
+ if err != nil {
+ return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", err)
+ }
+
+ id, err := iniProfile.GetKey("aws_access_key_id")
+ if err != nil {
+ return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey",
+ fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename),
+ err)
+ }
+
+ secret, err := iniProfile.GetKey("aws_secret_access_key")
+ if err != nil {
+ return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret",
+ fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename),
+ nil)
+ }
+
+ // Default to empty string if not found
+ token := iniProfile.Key("aws_session_token")
+
+ return Value{
+ AccessKeyID: id.String(),
+ SecretAccessKey: secret.String(),
+ SessionToken: token.String(),
+ ProviderName: SharedCredsProviderName,
+ }, nil
+}
+
+// filename returns the filename to use to read AWS shared credentials.
+//
+// Will return an error if the user's home directory path cannot be found.
+func (p *SharedCredentialsProvider) filename() (string, error) {
+ if p.Filename == "" {
+ if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); p.Filename != "" {
+ return p.Filename, nil
+ }
+
+ homeDir := os.Getenv("HOME") // *nix
+ if homeDir == "" { // Windows
+ homeDir = os.Getenv("USERPROFILE")
+ }
+ if homeDir == "" {
+ return "", ErrSharedCredentialsHomeNotFound
+ }
+
+ p.Filename = filepath.Join(homeDir, ".aws", "credentials")
+ }
+
+ return p.Filename, nil
+}
+
+// profile returns the AWS shared credentials profile. If empty will read
+// environment variable "AWS_PROFILE". If that is not set profile will
+// return "default".
+func (p *SharedCredentialsProvider) profile() string {
+ if p.Profile == "" {
+ p.Profile = os.Getenv("AWS_PROFILE")
+ }
+ if p.Profile == "" {
+ p.Profile = "default"
+ }
+
+ return p.Profile
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
new file mode 100644
index 0000000..6f07560
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go
@@ -0,0 +1,48 @@
+package credentials
+
+import (
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// StaticProviderName provides a name of Static provider
+const StaticProviderName = "StaticProvider"
+
+var (
+ // ErrStaticCredentialsEmpty is emitted when static credentials are empty.
+ //
+ // @readonly
+ ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil)
+)
+
+// A StaticProvider is a set of credentials which are set programmatically,
+// and will never expire.
+type StaticProvider struct {
+ Value
+}
+
+// NewStaticCredentials returns a pointer to a new Credentials object
+// wrapping a static credentials value provider.
+func NewStaticCredentials(id, secret, token string) *Credentials {
+ return NewCredentials(&StaticProvider{Value: Value{
+ AccessKeyID: id,
+ SecretAccessKey: secret,
+ SessionToken: token,
+ }})
+}
+
+// Retrieve returns the credentials or error if the credentials are invalid.
+func (s *StaticProvider) Retrieve() (Value, error) {
+ if s.AccessKeyID == "" || s.SecretAccessKey == "" {
+ return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty
+ }
+
+ s.Value.ProviderName = StaticProviderName
+ return s.Value, nil
+}
+
+// IsExpired returns if the credentials are expired.
+//
+// For StaticProvider, the credentials never expired.
+func (s *StaticProvider) IsExpired() bool {
+ return false
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
new file mode 100644
index 0000000..12be1a5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
@@ -0,0 +1,98 @@
+// Package defaults is a collection of helpers to retrieve the SDK's default
+// configuration and handlers.
+//
+// Generally this package shouldn't be used directly, but session.Session
+// instead. This package is useful when you need to reset the defaults
+// of a session or service client to the SDK defaults before setting
+// additional parameters.
+package defaults
+
+import (
+ "net/http"
+ "os"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/corehandlers"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
+ "github.com/aws/aws-sdk-go/aws/ec2metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/endpoints"
+)
+
+// A Defaults provides a collection of default values for SDK clients.
+type Defaults struct {
+ Config *aws.Config
+ Handlers request.Handlers
+}
+
+// Get returns the SDK's default values with Config and handlers pre-configured.
+func Get() Defaults {
+ cfg := Config()
+ handlers := Handlers()
+ cfg.Credentials = CredChain(cfg, handlers)
+
+ return Defaults{
+ Config: cfg,
+ Handlers: handlers,
+ }
+}
+
+// Config returns the default configuration without credentials.
+// To retrieve a config with credentials also included use
+// `defaults.Get().Config` instead.
+//
+// Generally you shouldn't need to use this method directly, but
+// is available if you need to reset the configuration of an
+// existing service client or session.
+func Config() *aws.Config {
+ return aws.NewConfig().
+ WithCredentials(credentials.AnonymousCredentials).
+ WithRegion(os.Getenv("AWS_REGION")).
+ WithHTTPClient(http.DefaultClient).
+ WithMaxRetries(aws.UseServiceDefaultRetries).
+ WithLogger(aws.NewDefaultLogger()).
+ WithLogLevel(aws.LogOff).
+ WithSleepDelay(time.Sleep)
+}
+
+// Handlers returns the default request handlers.
+//
+// Generally you shouldn't need to use this method directly, but
+// is available if you need to reset the request handlers of an
+// existing service client or session.
+func Handlers() request.Handlers {
+ var handlers request.Handlers
+
+ handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler)
+ handlers.Validate.AfterEachFn = request.HandlerListStopOnError
+ handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler)
+ handlers.Build.AfterEachFn = request.HandlerListStopOnError
+ handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
+ handlers.Send.PushBackNamed(corehandlers.SendHandler)
+ handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler)
+ handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler)
+
+ return handlers
+}
+
+// CredChain returns the default credential chain.
+//
+// Generally you shouldn't need to use this method directly, but
+// is available if you need to reset the credentials of an
+// existing service client or session's Config.
+func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials {
+ endpoint, signingRegion := endpoints.EndpointForRegion(ec2metadata.ServiceName, *cfg.Region, true)
+
+ return credentials.NewCredentials(&credentials.ChainProvider{
+ VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors),
+ Providers: []credentials.Provider{
+ &credentials.EnvProvider{},
+ &credentials.SharedCredentialsProvider{Filename: "", Profile: ""},
+ &ec2rolecreds.EC2RoleProvider{
+ Client: ec2metadata.NewClient(*cfg, handlers, endpoint, signingRegion),
+ ExpiryWindow: 5 * time.Minute,
+ },
+ }})
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
new file mode 100644
index 0000000..669c813
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go
@@ -0,0 +1,140 @@
+package ec2metadata
+
+import (
+ "encoding/json"
+ "fmt"
+ "path"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// GetMetadata uses the path provided to request information from the EC2
+// instance metdata service. The content will be returned as a string, or
+// error if the request failed.
+func (c *EC2Metadata) GetMetadata(p string) (string, error) {
+ op := &request.Operation{
+ Name: "GetMetadata",
+ HTTPMethod: "GET",
+ HTTPPath: path.Join("/", "meta-data", p),
+ }
+
+ output := &metadataOutput{}
+ req := c.NewRequest(op, nil, output)
+
+ return output.Content, req.Send()
+}
+
+// GetDynamicData uses the path provided to request information from the EC2
+// instance metadata service for dynamic data. The content will be returned
+// as a string, or error if the request failed.
+func (c *EC2Metadata) GetDynamicData(p string) (string, error) {
+ op := &request.Operation{
+ Name: "GetDynamicData",
+ HTTPMethod: "GET",
+ HTTPPath: path.Join("/", "dynamic", p),
+ }
+
+ output := &metadataOutput{}
+ req := c.NewRequest(op, nil, output)
+
+ return output.Content, req.Send()
+}
+
+// GetInstanceIdentityDocument retrieves an identity document describing an
+// instance. Error is returned if the request fails or is unable to parse
+// the response.
+func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) {
+ resp, err := c.GetDynamicData("instance-identity/document")
+ if err != nil {
+ return EC2InstanceIdentityDocument{},
+ awserr.New("EC2MetadataRequestError",
+ "failed to get EC2 instance identity document", err)
+ }
+
+ doc := EC2InstanceIdentityDocument{}
+ if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil {
+ return EC2InstanceIdentityDocument{},
+ awserr.New("SerializationError",
+ "failed to decode EC2 instance identity document", err)
+ }
+
+ return doc, nil
+}
+
+// IAMInfo retrieves IAM info from the metadata API
+func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) {
+ resp, err := c.GetMetadata("iam/info")
+ if err != nil {
+ return EC2IAMInfo{},
+ awserr.New("EC2MetadataRequestError",
+ "failed to get EC2 IAM info", err)
+ }
+
+ info := EC2IAMInfo{}
+ if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil {
+ return EC2IAMInfo{},
+ awserr.New("SerializationError",
+ "failed to decode EC2 IAM info", err)
+ }
+
+ if info.Code != "Success" {
+ errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code)
+ return EC2IAMInfo{},
+ awserr.New("EC2MetadataError", errMsg, nil)
+ }
+
+ return info, nil
+}
+
+// Region returns the region the instance is running in.
+func (c *EC2Metadata) Region() (string, error) {
+ resp, err := c.GetMetadata("placement/availability-zone")
+ if err != nil {
+ return "", err
+ }
+
+ // returns region without the suffix. Eg: us-west-2a becomes us-west-2
+ return resp[:len(resp)-1], nil
+}
+
+// Available returns if the application has access to the EC2 Metadata service.
+// Can be used to determine if application is running within an EC2 Instance and
+// the metadata service is available.
+func (c *EC2Metadata) Available() bool {
+ if _, err := c.GetMetadata("instance-id"); err != nil {
+ return false
+ }
+
+ return true
+}
+
+// An EC2IAMInfo provides the shape for unmarshalling
+// an IAM info from the metadata API
+type EC2IAMInfo struct {
+ Code string
+ LastUpdated time.Time
+ InstanceProfileArn string
+ InstanceProfileID string
+}
+
+// An EC2InstanceIdentityDocument provides the shape for unmarshalling
+// an instance identity document
+type EC2InstanceIdentityDocument struct {
+ DevpayProductCodes []string `json:"devpayProductCodes"`
+ AvailabilityZone string `json:"availabilityZone"`
+ PrivateIP string `json:"privateIp"`
+ Version string `json:"version"`
+ Region string `json:"region"`
+ InstanceID string `json:"instanceId"`
+ BillingProducts []string `json:"billingProducts"`
+ InstanceType string `json:"instanceType"`
+ AccountID string `json:"accountId"`
+ PendingTime time.Time `json:"pendingTime"`
+ ImageID string `json:"imageId"`
+ KernelID string `json:"kernelId"`
+ RamdiskID string `json:"ramdiskId"`
+ Architecture string `json:"architecture"`
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
new file mode 100644
index 0000000..5b4379d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go
@@ -0,0 +1,124 @@
+// Package ec2metadata provides the client for making API calls to the
+// EC2 Metadata service.
+package ec2metadata
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "net/http"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// ServiceName is the name of the service.
+const ServiceName = "ec2metadata"
+
+// A EC2Metadata is an EC2 Metadata service Client.
+type EC2Metadata struct {
+ *client.Client
+}
+
+// New creates a new instance of the EC2Metadata client with a session.
+// This client is safe to use across multiple goroutines.
+//
+//
+// Example:
+// // Create a EC2Metadata client from just a session.
+// svc := ec2metadata.New(mySession)
+//
+// // Create a EC2Metadata client with additional configuration
+// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody))
+func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata {
+ c := p.ClientConfig(ServiceName, cfgs...)
+ return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// NewClient returns a new EC2Metadata client. Should be used to create
+// a client when not using a session. Generally using just New with a session
+// is preferred.
+//
+// If an unmodified HTTP client is provided from the stdlib default, or no client
+// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened.
+// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default.
+func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata {
+ if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) {
+ // If the http client is unmodified and this feature is not disabled
+ // set custom timeouts for EC2Metadata requests.
+ cfg.HTTPClient = &http.Client{
+ // use a shorter timeout than default because the metadata
+ // service is local if it is running, and to fail faster
+ // if not running on an ec2 instance.
+ Timeout: 5 * time.Second,
+ }
+ }
+
+ svc := &EC2Metadata{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: ServiceName,
+ Endpoint: endpoint,
+ APIVersion: "latest",
+ },
+ handlers,
+ ),
+ }
+
+ svc.Handlers.Unmarshal.PushBack(unmarshalHandler)
+ svc.Handlers.UnmarshalError.PushBack(unmarshalError)
+ svc.Handlers.Validate.Clear()
+ svc.Handlers.Validate.PushBack(validateEndpointHandler)
+
+ // Add additional options to the service config
+ for _, option := range opts {
+ option(svc.Client)
+ }
+
+ return svc
+}
+
+func httpClientZero(c *http.Client) bool {
+ return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0)
+}
+
+type metadataOutput struct {
+ Content string
+}
+
+func unmarshalHandler(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+ b := &bytes.Buffer{}
+ if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
+ r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err)
+ return
+ }
+
+ if data, ok := r.Data.(*metadataOutput); ok {
+ data.Content = b.String()
+ }
+}
+
+func unmarshalError(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+ b := &bytes.Buffer{}
+ if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil {
+ r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err)
+ return
+ }
+
+ // Response body format is not consistent between metadata endpoints.
+ // Grab the error message as a string and include that as the source error
+ r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String()))
+}
+
+func validateEndpointHandler(r *request.Request) {
+ if r.ClientInfo.Endpoint == "" {
+ r.Error = aws.ErrMissingEndpoint
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/vendor/github.com/aws/aws-sdk-go/aws/errors.go
new file mode 100644
index 0000000..5766361
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/errors.go
@@ -0,0 +1,17 @@
+package aws
+
+import "github.com/aws/aws-sdk-go/aws/awserr"
+
+var (
+ // ErrMissingRegion is an error that is returned if region configuration is
+ // not found.
+ //
+ // @readonly
+ ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil)
+
+ // ErrMissingEndpoint is an error that is returned if an endpoint cannot be
+ // resolved for a service.
+ //
+ // @readonly
+ ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil)
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go
new file mode 100644
index 0000000..db87188
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/logger.go
@@ -0,0 +1,112 @@
+package aws
+
+import (
+ "log"
+ "os"
+)
+
+// A LogLevelType defines the level logging should be performed at. Used to instruct
+// the SDK which statements should be logged.
+type LogLevelType uint
+
+// LogLevel returns the pointer to a LogLevel. Should be used to workaround
+// not being able to take the address of a non-composite literal.
+func LogLevel(l LogLevelType) *LogLevelType {
+ return &l
+}
+
+// Value returns the LogLevel value or the default value LogOff if the LogLevel
+// is nil. Safe to use on nil value LogLevelTypes.
+func (l *LogLevelType) Value() LogLevelType {
+ if l != nil {
+ return *l
+ }
+ return LogOff
+}
+
+// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be
+// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If
+// LogLevel is nill, will default to LogOff comparison.
+func (l *LogLevelType) Matches(v LogLevelType) bool {
+ c := l.Value()
+ return c&v == v
+}
+
+// AtLeast returns true if this LogLevel is at least high enough to satisfies v.
+// Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default
+// to LogOff comparison.
+func (l *LogLevelType) AtLeast(v LogLevelType) bool {
+ c := l.Value()
+ return c >= v
+}
+
+const (
+ // LogOff states that no logging should be performed by the SDK. This is the
+ // default state of the SDK, and should be use to disable all logging.
+ LogOff LogLevelType = iota * 0x1000
+
+ // LogDebug state that debug output should be logged by the SDK. This should
+ // be used to inspect request made and responses received.
+ LogDebug
+)
+
+// Debug Logging Sub Levels
+const (
+ // LogDebugWithSigning states that the SDK should log request signing and
+ // presigning events. This should be used to log the signing details of
+ // requests for debugging. Will also enable LogDebug.
+ LogDebugWithSigning LogLevelType = LogDebug | (1 << iota)
+
+ // LogDebugWithHTTPBody states the SDK should log HTTP request and response
+ // HTTP bodys in addition to the headers and path. This should be used to
+ // see the body content of requests and responses made while using the SDK
+ // Will also enable LogDebug.
+ LogDebugWithHTTPBody
+
+ // LogDebugWithRequestRetries states the SDK should log when service requests will
+ // be retried. This should be used to log when you want to log when service
+ // requests are being retried. Will also enable LogDebug.
+ LogDebugWithRequestRetries
+
+ // LogDebugWithRequestErrors states the SDK should log when service requests fail
+ // to build, send, validate, or unmarshal.
+ LogDebugWithRequestErrors
+)
+
+// A Logger is a minimalistic interface for the SDK to log messages to. Should
+// be used to provide custom logging writers for the SDK to use.
+type Logger interface {
+ Log(...interface{})
+}
+
+// A LoggerFunc is a convenience type to convert a function taking a variadic
+// list of arguments and wrap it so the Logger interface can be used.
+//
+// Example:
+// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) {
+// fmt.Fprintln(os.Stdout, args...)
+// })})
+type LoggerFunc func(...interface{})
+
+// Log calls the wrapped function with the arguments provided
+func (f LoggerFunc) Log(args ...interface{}) {
+ f(args...)
+}
+
+// NewDefaultLogger returns a Logger which will write log messages to stdout, and
+// use same formatting runes as the stdlib log.Logger
+func NewDefaultLogger() Logger {
+ return &defaultLogger{
+ logger: log.New(os.Stdout, "", log.LstdFlags),
+ }
+}
+
+// A defaultLogger provides a minimalistic logger satisfying the Logger interface.
+type defaultLogger struct {
+ logger *log.Logger
+}
+
+// Log logs the parameters to the stdlib logger. See log.Println.
+func (l defaultLogger) Log(args ...interface{}) {
+ l.logger.Println(args...)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
new file mode 100644
index 0000000..5279c19
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
@@ -0,0 +1,187 @@
+package request
+
+import (
+ "fmt"
+ "strings"
+)
+
+// A Handlers provides a collection of request handlers for various
+// stages of handling requests.
+type Handlers struct {
+ Validate HandlerList
+ Build HandlerList
+ Sign HandlerList
+ Send HandlerList
+ ValidateResponse HandlerList
+ Unmarshal HandlerList
+ UnmarshalMeta HandlerList
+ UnmarshalError HandlerList
+ Retry HandlerList
+ AfterRetry HandlerList
+}
+
+// Copy returns of this handler's lists.
+func (h *Handlers) Copy() Handlers {
+ return Handlers{
+ Validate: h.Validate.copy(),
+ Build: h.Build.copy(),
+ Sign: h.Sign.copy(),
+ Send: h.Send.copy(),
+ ValidateResponse: h.ValidateResponse.copy(),
+ Unmarshal: h.Unmarshal.copy(),
+ UnmarshalError: h.UnmarshalError.copy(),
+ UnmarshalMeta: h.UnmarshalMeta.copy(),
+ Retry: h.Retry.copy(),
+ AfterRetry: h.AfterRetry.copy(),
+ }
+}
+
+// Clear removes callback functions for all handlers
+func (h *Handlers) Clear() {
+ h.Validate.Clear()
+ h.Build.Clear()
+ h.Send.Clear()
+ h.Sign.Clear()
+ h.Unmarshal.Clear()
+ h.UnmarshalMeta.Clear()
+ h.UnmarshalError.Clear()
+ h.ValidateResponse.Clear()
+ h.Retry.Clear()
+ h.AfterRetry.Clear()
+}
+
+// A HandlerListRunItem represents an entry in the HandlerList which
+// is being run.
+type HandlerListRunItem struct {
+ Index int
+ Handler NamedHandler
+ Request *Request
+}
+
+// A HandlerList manages zero or more handlers in a list.
+type HandlerList struct {
+ list []NamedHandler
+
+ // Called after each request handler in the list is called. If set
+ // and the func returns true the HandlerList will continue to iterate
+ // over the request handlers. If false is returned the HandlerList
+ // will stop iterating.
+ //
+ // Should be used if extra logic to be performed between each handler
+ // in the list. This can be used to terminate a list's iteration
+ // based on a condition such as error like, HandlerListStopOnError.
+ // Or for logging like HandlerListLogItem.
+ AfterEachFn func(item HandlerListRunItem) bool
+}
+
+// A NamedHandler is a struct that contains a name and function callback.
+type NamedHandler struct {
+ Name string
+ Fn func(*Request)
+}
+
+// copy creates a copy of the handler list.
+func (l *HandlerList) copy() HandlerList {
+ n := HandlerList{
+ AfterEachFn: l.AfterEachFn,
+ }
+ n.list = append([]NamedHandler{}, l.list...)
+ return n
+}
+
+// Clear clears the handler list.
+func (l *HandlerList) Clear() {
+ l.list = []NamedHandler{}
+}
+
+// Len returns the number of handlers in the list.
+func (l *HandlerList) Len() int {
+ return len(l.list)
+}
+
+// PushBack pushes handler f to the back of the handler list.
+func (l *HandlerList) PushBack(f func(*Request)) {
+ l.list = append(l.list, NamedHandler{"__anonymous", f})
+}
+
+// PushFront pushes handler f to the front of the handler list.
+func (l *HandlerList) PushFront(f func(*Request)) {
+ l.list = append([]NamedHandler{{"__anonymous", f}}, l.list...)
+}
+
+// PushBackNamed pushes named handler f to the back of the handler list.
+func (l *HandlerList) PushBackNamed(n NamedHandler) {
+ l.list = append(l.list, n)
+}
+
+// PushFrontNamed pushes named handler f to the front of the handler list.
+func (l *HandlerList) PushFrontNamed(n NamedHandler) {
+ l.list = append([]NamedHandler{n}, l.list...)
+}
+
+// Remove removes a NamedHandler n
+func (l *HandlerList) Remove(n NamedHandler) {
+ newlist := []NamedHandler{}
+ for _, m := range l.list {
+ if m.Name != n.Name {
+ newlist = append(newlist, m)
+ }
+ }
+ l.list = newlist
+}
+
+// Run executes all handlers in the list with a given request object.
+func (l *HandlerList) Run(r *Request) {
+ for i, h := range l.list {
+ h.Fn(r)
+ item := HandlerListRunItem{
+ Index: i, Handler: h, Request: r,
+ }
+ if l.AfterEachFn != nil && !l.AfterEachFn(item) {
+ return
+ }
+ }
+}
+
+// HandlerListLogItem logs the request handler and the state of the
+// request's Error value. Always returns true to continue iterating
+// request handlers in a HandlerList.
+func HandlerListLogItem(item HandlerListRunItem) bool {
+ if item.Request.Config.Logger == nil {
+ return true
+ }
+ item.Request.Config.Logger.Log("DEBUG: RequestHandler",
+ item.Index, item.Handler.Name, item.Request.Error)
+
+ return true
+}
+
+// HandlerListStopOnError returns false to stop the HandlerList iterating
+// over request handlers if Request.Error is not nil. True otherwise
+// to continue iterating.
+func HandlerListStopOnError(item HandlerListRunItem) bool {
+ return item.Request.Error == nil
+}
+
+// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request
+// header. If the extra parameters are provided they will be added as metadata to the
+// name/version pair resulting in the following format.
+// "name/version (extra0; extra1; ...)"
+// The user agent part will be concatenated with this current request's user agent string.
+func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) {
+ ua := fmt.Sprintf("%s/%s", name, version)
+ if len(extra) > 0 {
+ ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; "))
+ }
+ return func(r *Request) {
+ AddToUserAgent(r, ua)
+ }
+}
+
+// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header.
+// The input string will be concatenated with the current request's user agent string.
+func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) {
+ return func(r *Request) {
+ AddToUserAgent(r, s)
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go
new file mode 100644
index 0000000..a4087f2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go
@@ -0,0 +1,33 @@
+// +build go1.5
+
+package request
+
+import (
+ "io"
+ "net/http"
+ "net/url"
+)
+
+func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request {
+ req := &http.Request{
+ URL: &url.URL{},
+ Header: http.Header{},
+ Close: r.Close,
+ Body: body,
+ Host: r.Host,
+ Method: r.Method,
+ Proto: r.Proto,
+ ContentLength: r.ContentLength,
+ // Cancel will be deprecated in 1.7 and will be replaced with Context
+ Cancel: r.Cancel,
+ }
+
+ *req.URL = *r.URL
+ for k, v := range r.Header {
+ for _, vv := range v {
+ req.Header.Add(k, vv)
+ }
+ }
+
+ return req
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go
new file mode 100644
index 0000000..75da021
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go
@@ -0,0 +1,31 @@
+// +build !go1.5
+
+package request
+
+import (
+ "io"
+ "net/http"
+ "net/url"
+)
+
+func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request {
+ req := &http.Request{
+ URL: &url.URL{},
+ Header: http.Header{},
+ Close: r.Close,
+ Body: body,
+ Host: r.Host,
+ Method: r.Method,
+ Proto: r.Proto,
+ ContentLength: r.ContentLength,
+ }
+
+ *req.URL = *r.URL
+ for k, v := range r.Header {
+ for _, vv := range v {
+ req.Header.Add(k, vv)
+ }
+ }
+
+ return req
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
new file mode 100644
index 0000000..da6396d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go
@@ -0,0 +1,49 @@
+package request
+
+import (
+ "io"
+ "sync"
+)
+
+// offsetReader is a thread-safe io.ReadCloser to prevent racing
+// with retrying requests
+type offsetReader struct {
+ buf io.ReadSeeker
+ lock sync.RWMutex
+ closed bool
+}
+
+func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader {
+ reader := &offsetReader{}
+ buf.Seek(offset, 0)
+
+ reader.buf = buf
+ return reader
+}
+
+// Close is a thread-safe close. Uses the write lock.
+func (o *offsetReader) Close() error {
+ o.lock.Lock()
+ defer o.lock.Unlock()
+ o.closed = true
+ return nil
+}
+
+// Read is a thread-safe read using a read lock.
+func (o *offsetReader) Read(p []byte) (int, error) {
+ o.lock.RLock()
+ defer o.lock.RUnlock()
+
+ if o.closed {
+ return 0, io.EOF
+ }
+
+ return o.buf.Read(p)
+}
+
+// CloseAndCopy will return a new offsetReader with a copy of the old buffer
+// and close the old buffer.
+func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader {
+ o.Close()
+ return newOffsetReader(o.buf, offset)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
new file mode 100644
index 0000000..711b90c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
@@ -0,0 +1,329 @@
+package request
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "reflect"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+)
+
+// A Request is the service request to be made.
+type Request struct {
+ Config aws.Config
+ ClientInfo metadata.ClientInfo
+ Handlers Handlers
+
+ Retryer
+ Time time.Time
+ ExpireTime time.Duration
+ Operation *Operation
+ HTTPRequest *http.Request
+ HTTPResponse *http.Response
+ Body io.ReadSeeker
+ BodyStart int64 // offset from beginning of Body that the request body starts
+ Params interface{}
+ Error error
+ Data interface{}
+ RequestID string
+ RetryCount int
+ Retryable *bool
+ RetryDelay time.Duration
+ NotHoist bool
+ SignedHeaderVals http.Header
+
+ built bool
+}
+
+// An Operation is the service API operation to be made.
+type Operation struct {
+ Name string
+ HTTPMethod string
+ HTTPPath string
+ *Paginator
+}
+
+// Paginator keeps track of pagination configuration for an API operation.
+type Paginator struct {
+ InputTokens []string
+ OutputTokens []string
+ LimitToken string
+ TruncationToken string
+}
+
+// New returns a new Request pointer for the service API
+// operation and parameters.
+//
+// Params is any value of input parameters to be the request payload.
+// Data is pointer value to an object which the request's response
+// payload will be deserialized to.
+func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers,
+ retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request {
+
+ method := operation.HTTPMethod
+ if method == "" {
+ method = "POST"
+ }
+ p := operation.HTTPPath
+ if p == "" {
+ p = "/"
+ }
+
+ httpReq, _ := http.NewRequest(method, "", nil)
+
+ var err error
+ httpReq.URL, err = url.Parse(clientInfo.Endpoint + p)
+ if err != nil {
+ httpReq.URL = &url.URL{}
+ err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err)
+ }
+
+ r := &Request{
+ Config: cfg,
+ ClientInfo: clientInfo,
+ Handlers: handlers.Copy(),
+
+ Retryer: retryer,
+ Time: time.Now(),
+ ExpireTime: 0,
+ Operation: operation,
+ HTTPRequest: httpReq,
+ Body: nil,
+ Params: params,
+ Error: err,
+ Data: data,
+ }
+ r.SetBufferBody([]byte{})
+
+ return r
+}
+
+// WillRetry returns if the request's can be retried.
+func (r *Request) WillRetry() bool {
+ return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries()
+}
+
+// ParamsFilled returns if the request's parameters have been populated
+// and the parameters are valid. False is returned if no parameters are
+// provided or invalid.
+func (r *Request) ParamsFilled() bool {
+ return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid()
+}
+
+// DataFilled returns true if the request's data for response deserialization
+// target has been set and is a valid. False is returned if data is not
+// set, or is invalid.
+func (r *Request) DataFilled() bool {
+ return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid()
+}
+
+// SetBufferBody will set the request's body bytes that will be sent to
+// the service API.
+func (r *Request) SetBufferBody(buf []byte) {
+ r.SetReaderBody(bytes.NewReader(buf))
+}
+
+// SetStringBody sets the body of the request to be backed by a string.
+func (r *Request) SetStringBody(s string) {
+ r.SetReaderBody(strings.NewReader(s))
+}
+
+// SetReaderBody will set the request's body reader.
+func (r *Request) SetReaderBody(reader io.ReadSeeker) {
+ r.HTTPRequest.Body = newOffsetReader(reader, 0)
+ r.Body = reader
+}
+
+// Presign returns the request's signed URL. Error will be returned
+// if the signing fails.
+func (r *Request) Presign(expireTime time.Duration) (string, error) {
+ r.ExpireTime = expireTime
+ r.NotHoist = false
+ r.Sign()
+ if r.Error != nil {
+ return "", r.Error
+ }
+ return r.HTTPRequest.URL.String(), nil
+}
+
+// PresignRequest behaves just like presign, but hoists all headers and signs them.
+// Also returns the signed hash back to the user
+func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) {
+ r.ExpireTime = expireTime
+ r.NotHoist = true
+ r.Sign()
+ if r.Error != nil {
+ return "", nil, r.Error
+ }
+ return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil
+}
+
+func debugLogReqError(r *Request, stage string, retrying bool, err error) {
+ if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) {
+ return
+ }
+
+ retryStr := "not retrying"
+ if retrying {
+ retryStr = "will retry"
+ }
+
+ r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v",
+ stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err))
+}
+
+// Build will build the request's object so it can be signed and sent
+// to the service. Build will also validate all the request's parameters.
+// Anny additional build Handlers set on this request will be run
+// in the order they were set.
+//
+// The request will only be built once. Multiple calls to build will have
+// no effect.
+//
+// If any Validate or Build errors occur the build will stop and the error
+// which occurred will be returned.
+func (r *Request) Build() error {
+ if !r.built {
+ r.Handlers.Validate.Run(r)
+ if r.Error != nil {
+ debugLogReqError(r, "Validate Request", false, r.Error)
+ return r.Error
+ }
+ r.Handlers.Build.Run(r)
+ if r.Error != nil {
+ debugLogReqError(r, "Build Request", false, r.Error)
+ return r.Error
+ }
+ r.built = true
+ }
+
+ return r.Error
+}
+
+// Sign will sign the request returning error if errors are encountered.
+//
+// Send will build the request prior to signing. All Sign Handlers will
+// be executed in the order they were set.
+func (r *Request) Sign() error {
+ r.Build()
+ if r.Error != nil {
+ debugLogReqError(r, "Build Request", false, r.Error)
+ return r.Error
+ }
+
+ r.Handlers.Sign.Run(r)
+ return r.Error
+}
+
+// Send will send the request returning error if errors are encountered.
+//
+// Send will sign the request prior to sending. All Send Handlers will
+// be executed in the order they were set.
+//
+// Canceling a request is non-deterministic. If a request has been canceled,
+// then the transport will choose, randomly, one of the state channels during
+// reads or getting the connection.
+//
+// readLoop() and getConn(req *Request, cm connectMethod)
+// https://github.com/golang/go/blob/master/src/net/http/transport.go
+func (r *Request) Send() error {
+ for {
+ if aws.BoolValue(r.Retryable) {
+ if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) {
+ r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d",
+ r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount))
+ }
+
+ var body io.ReadCloser
+ if reader, ok := r.HTTPRequest.Body.(*offsetReader); ok {
+ body = reader.CloseAndCopy(r.BodyStart)
+ } else {
+ if r.Config.Logger != nil {
+ r.Config.Logger.Log("Request body type has been overwritten. May cause race conditions")
+ }
+ r.Body.Seek(r.BodyStart, 0)
+ body = ioutil.NopCloser(r.Body)
+ }
+
+ r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, body)
+ if r.HTTPResponse != nil && r.HTTPResponse.Body != nil {
+ // Closing response body. Since we are setting a new request to send off, this
+ // response will get squashed and leaked.
+ r.HTTPResponse.Body.Close()
+ }
+ }
+
+ r.Sign()
+ if r.Error != nil {
+ return r.Error
+ }
+
+ r.Retryable = nil
+
+ r.Handlers.Send.Run(r)
+ if r.Error != nil {
+ if strings.Contains(r.Error.Error(), "net/http: request canceled") {
+ return r.Error
+ }
+
+ err := r.Error
+ r.Handlers.Retry.Run(r)
+ r.Handlers.AfterRetry.Run(r)
+ if r.Error != nil {
+ debugLogReqError(r, "Send Request", false, r.Error)
+ return r.Error
+ }
+ debugLogReqError(r, "Send Request", true, err)
+ continue
+ }
+
+ r.Handlers.UnmarshalMeta.Run(r)
+ r.Handlers.ValidateResponse.Run(r)
+ if r.Error != nil {
+ err := r.Error
+ r.Handlers.UnmarshalError.Run(r)
+ r.Handlers.Retry.Run(r)
+ r.Handlers.AfterRetry.Run(r)
+ if r.Error != nil {
+ debugLogReqError(r, "Validate Response", false, r.Error)
+ return r.Error
+ }
+ debugLogReqError(r, "Validate Response", true, err)
+ continue
+ }
+
+ r.Handlers.Unmarshal.Run(r)
+ if r.Error != nil {
+ err := r.Error
+ r.Handlers.Retry.Run(r)
+ r.Handlers.AfterRetry.Run(r)
+ if r.Error != nil {
+ debugLogReqError(r, "Unmarshal Response", false, r.Error)
+ return r.Error
+ }
+ debugLogReqError(r, "Unmarshal Response", true, err)
+ continue
+ }
+
+ break
+ }
+
+ return nil
+}
+
+// AddToUserAgent adds the string to the end of the request's current user agent.
+func AddToUserAgent(r *Request, s string) {
+ curUA := r.HTTPRequest.Header.Get("User-Agent")
+ if len(curUA) > 0 {
+ s = curUA + " " + s
+ }
+ r.HTTPRequest.Header.Set("User-Agent", s)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
new file mode 100644
index 0000000..2939ec4
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
@@ -0,0 +1,104 @@
+package request
+
+import (
+ "reflect"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+)
+
+//type Paginater interface {
+// HasNextPage() bool
+// NextPage() *Request
+// EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error
+//}
+
+// HasNextPage returns true if this request has more pages of data available.
+func (r *Request) HasNextPage() bool {
+ return len(r.nextPageTokens()) > 0
+}
+
+// nextPageTokens returns the tokens to use when asking for the next page of
+// data.
+func (r *Request) nextPageTokens() []interface{} {
+ if r.Operation.Paginator == nil {
+ return nil
+ }
+
+ if r.Operation.TruncationToken != "" {
+ tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken)
+ if len(tr) == 0 {
+ return nil
+ }
+
+ switch v := tr[0].(type) {
+ case *bool:
+ if !aws.BoolValue(v) {
+ return nil
+ }
+ case bool:
+ if v == false {
+ return nil
+ }
+ }
+ }
+
+ tokens := []interface{}{}
+ tokenAdded := false
+ for _, outToken := range r.Operation.OutputTokens {
+ v, _ := awsutil.ValuesAtPath(r.Data, outToken)
+ if len(v) > 0 {
+ tokens = append(tokens, v[0])
+ tokenAdded = true
+ } else {
+ tokens = append(tokens, nil)
+ }
+ }
+ if !tokenAdded {
+ return nil
+ }
+
+ return tokens
+}
+
+// NextPage returns a new Request that can be executed to return the next
+// page of result data. Call .Send() on this request to execute it.
+func (r *Request) NextPage() *Request {
+ tokens := r.nextPageTokens()
+ if len(tokens) == 0 {
+ return nil
+ }
+
+ data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface()
+ nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data)
+ for i, intok := range nr.Operation.InputTokens {
+ awsutil.SetValueAtPath(nr.Params, intok, tokens[i])
+ }
+ return nr
+}
+
+// EachPage iterates over each page of a paginated request object. The fn
+// parameter should be a function with the following sample signature:
+//
+// func(page *T, lastPage bool) bool {
+// return true // return false to stop iterating
+// }
+//
+// Where "T" is the structure type matching the output structure of the given
+// operation. For example, a request object generated by
+// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput
+// as the structure "T". The lastPage value represents whether the page is
+// the last page of data or not. The return value of this function should
+// return true to keep iterating or false to stop.
+func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error {
+ for page := r; page != nil; page = page.NextPage() {
+ if err := page.Send(); err != nil {
+ return err
+ }
+ if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage {
+ return page.Error
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
new file mode 100644
index 0000000..8cc8b01
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
@@ -0,0 +1,101 @@
+package request
+
+import (
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+// Retryer is an interface to control retry logic for a given service.
+// The default implementation used by most services is the service.DefaultRetryer
+// structure, which contains basic retry logic using exponential backoff.
+type Retryer interface {
+ RetryRules(*Request) time.Duration
+ ShouldRetry(*Request) bool
+ MaxRetries() int
+}
+
+// WithRetryer sets a config Retryer value to the given Config returning it
+// for chaining.
+func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config {
+ cfg.Retryer = retryer
+ return cfg
+}
+
+// retryableCodes is a collection of service response codes which are retry-able
+// without any further action.
+var retryableCodes = map[string]struct{}{
+ "RequestError": {},
+ "RequestTimeout": {},
+}
+
+var throttleCodes = map[string]struct{}{
+ "ProvisionedThroughputExceededException": {},
+ "Throttling": {},
+ "ThrottlingException": {},
+ "RequestLimitExceeded": {},
+ "RequestThrottled": {},
+ "LimitExceededException": {}, // Deleting 10+ DynamoDb tables at once
+ "TooManyRequestsException": {}, // Lambda functions
+}
+
+// credsExpiredCodes is a collection of error codes which signify the credentials
+// need to be refreshed. Expired tokens require refreshing of credentials, and
+// resigning before the request can be retried.
+var credsExpiredCodes = map[string]struct{}{
+ "ExpiredToken": {},
+ "ExpiredTokenException": {},
+ "RequestExpired": {}, // EC2 Only
+}
+
+func isCodeThrottle(code string) bool {
+ _, ok := throttleCodes[code]
+ return ok
+}
+
+func isCodeRetryable(code string) bool {
+ if _, ok := retryableCodes[code]; ok {
+ return true
+ }
+
+ return isCodeExpiredCreds(code)
+}
+
+func isCodeExpiredCreds(code string) bool {
+ _, ok := credsExpiredCodes[code]
+ return ok
+}
+
+// IsErrorRetryable returns whether the error is retryable, based on its Code.
+// Returns false if the request has no Error set.
+func (r *Request) IsErrorRetryable() bool {
+ if r.Error != nil {
+ if err, ok := r.Error.(awserr.Error); ok {
+ return isCodeRetryable(err.Code())
+ }
+ }
+ return false
+}
+
+// IsErrorThrottle returns whether the error is to be throttled based on its code.
+// Returns false if the request has no Error set
+func (r *Request) IsErrorThrottle() bool {
+ if r.Error != nil {
+ if err, ok := r.Error.(awserr.Error); ok {
+ return isCodeThrottle(err.Code())
+ }
+ }
+ return false
+}
+
+// IsErrorExpired returns whether the error code is a credential expiry error.
+// Returns false if the request has no Error set.
+func (r *Request) IsErrorExpired() bool {
+ if r.Error != nil {
+ if err, ok := r.Error.(awserr.Error); ok {
+ return isCodeExpiredCreds(err.Code())
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
new file mode 100644
index 0000000..2520286
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
@@ -0,0 +1,234 @@
+package request
+
+import (
+ "bytes"
+ "fmt"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+const (
+ // InvalidParameterErrCode is the error code for invalid parameters errors
+ InvalidParameterErrCode = "InvalidParameter"
+ // ParamRequiredErrCode is the error code for required parameter errors
+ ParamRequiredErrCode = "ParamRequiredError"
+ // ParamMinValueErrCode is the error code for fields with too low of a
+ // number value.
+ ParamMinValueErrCode = "ParamMinValueError"
+ // ParamMinLenErrCode is the error code for fields without enough elements.
+ ParamMinLenErrCode = "ParamMinLenError"
+)
+
+// Validator provides a way for types to perform validation logic on their
+// input values that external code can use to determine if a type's values
+// are valid.
+type Validator interface {
+ Validate() error
+}
+
+// An ErrInvalidParams provides wrapping of invalid parameter errors found when
+// validating API operation input parameters.
+type ErrInvalidParams struct {
+ // Context is the base context of the invalid parameter group.
+ Context string
+ errs []ErrInvalidParam
+}
+
+// Add adds a new invalid parameter error to the collection of invalid
+// parameters. The context of the invalid parameter will be updated to reflect
+// this collection.
+func (e *ErrInvalidParams) Add(err ErrInvalidParam) {
+ err.SetContext(e.Context)
+ e.errs = append(e.errs, err)
+}
+
+// AddNested adds the invalid parameter errors from another ErrInvalidParams
+// value into this collection. The nested errors will have their nested context
+// updated and base context to reflect the merging.
+//
+// Use for nested validations errors.
+func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) {
+ for _, err := range nested.errs {
+ err.SetContext(e.Context)
+ err.AddNestedContext(nestedCtx)
+ e.errs = append(e.errs, err)
+ }
+}
+
+// Len returns the number of invalid parameter errors
+func (e ErrInvalidParams) Len() int {
+ return len(e.errs)
+}
+
+// Code returns the code of the error
+func (e ErrInvalidParams) Code() string {
+ return InvalidParameterErrCode
+}
+
+// Message returns the message of the error
+func (e ErrInvalidParams) Message() string {
+ return fmt.Sprintf("%d validation error(s) found.", len(e.errs))
+}
+
+// Error returns the string formatted form of the invalid parameters.
+func (e ErrInvalidParams) Error() string {
+ w := &bytes.Buffer{}
+ fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message())
+
+ for _, err := range e.errs {
+ fmt.Fprintf(w, "- %s\n", err.Message())
+ }
+
+ return w.String()
+}
+
+// OrigErr returns the invalid parameters as a awserr.BatchedErrors value
+func (e ErrInvalidParams) OrigErr() error {
+ return awserr.NewBatchError(
+ InvalidParameterErrCode, e.Message(), e.OrigErrs())
+}
+
+// OrigErrs returns a slice of the invalid parameters
+func (e ErrInvalidParams) OrigErrs() []error {
+ errs := make([]error, len(e.errs))
+ for i := 0; i < len(errs); i++ {
+ errs[i] = e.errs[i]
+ }
+
+ return errs
+}
+
+// An ErrInvalidParam represents an invalid parameter error type.
+type ErrInvalidParam interface {
+ awserr.Error
+
+ // Field name the error occurred on.
+ Field() string
+
+ // SetContext updates the context of the error.
+ SetContext(string)
+
+ // AddNestedContext updates the error's context to include a nested level.
+ AddNestedContext(string)
+}
+
+type errInvalidParam struct {
+ context string
+ nestedContext string
+ field string
+ code string
+ msg string
+}
+
+// Code returns the error code for the type of invalid parameter.
+func (e *errInvalidParam) Code() string {
+ return e.code
+}
+
+// Message returns the reason the parameter was invalid, and its context.
+func (e *errInvalidParam) Message() string {
+ return fmt.Sprintf("%s, %s.", e.msg, e.Field())
+}
+
+// Error returns the string version of the invalid parameter error.
+func (e *errInvalidParam) Error() string {
+ return fmt.Sprintf("%s: %s", e.code, e.Message())
+}
+
+// OrigErr returns nil, Implemented for awserr.Error interface.
+func (e *errInvalidParam) OrigErr() error {
+ return nil
+}
+
+// Field Returns the field and context the error occurred.
+func (e *errInvalidParam) Field() string {
+ field := e.context
+ if len(field) > 0 {
+ field += "."
+ }
+ if len(e.nestedContext) > 0 {
+ field += fmt.Sprintf("%s.", e.nestedContext)
+ }
+ field += e.field
+
+ return field
+}
+
+// SetContext updates the base context of the error.
+func (e *errInvalidParam) SetContext(ctx string) {
+ e.context = ctx
+}
+
+// AddNestedContext prepends a context to the field's path.
+func (e *errInvalidParam) AddNestedContext(ctx string) {
+ if len(e.nestedContext) == 0 {
+ e.nestedContext = ctx
+ } else {
+ e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext)
+ }
+
+}
+
+// An ErrParamRequired represents an required parameter error.
+type ErrParamRequired struct {
+ errInvalidParam
+}
+
+// NewErrParamRequired creates a new required parameter error.
+func NewErrParamRequired(field string) *ErrParamRequired {
+ return &ErrParamRequired{
+ errInvalidParam{
+ code: ParamRequiredErrCode,
+ field: field,
+ msg: fmt.Sprintf("missing required field"),
+ },
+ }
+}
+
+// An ErrParamMinValue represents a minimum value parameter error.
+type ErrParamMinValue struct {
+ errInvalidParam
+ min float64
+}
+
+// NewErrParamMinValue creates a new minimum value parameter error.
+func NewErrParamMinValue(field string, min float64) *ErrParamMinValue {
+ return &ErrParamMinValue{
+ errInvalidParam: errInvalidParam{
+ code: ParamMinValueErrCode,
+ field: field,
+ msg: fmt.Sprintf("minimum field value of %v", min),
+ },
+ min: min,
+ }
+}
+
+// MinValue returns the field's require minimum value.
+//
+// float64 is returned for both int and float min values.
+func (e *ErrParamMinValue) MinValue() float64 {
+ return e.min
+}
+
+// An ErrParamMinLen represents a minimum length parameter error.
+type ErrParamMinLen struct {
+ errInvalidParam
+ min int
+}
+
+// NewErrParamMinLen creates a new minimum length parameter error.
+func NewErrParamMinLen(field string, min int) *ErrParamMinLen {
+ return &ErrParamMinLen{
+ errInvalidParam: errInvalidParam{
+ code: ParamMinValueErrCode,
+ field: field,
+ msg: fmt.Sprintf("minimum field size of %v", min),
+ },
+ min: min,
+ }
+}
+
+// MinLen returns the field's required minimum length.
+func (e *ErrParamMinLen) MinLen() int {
+ return e.min
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
new file mode 100644
index 0000000..6bc8f1b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
@@ -0,0 +1,120 @@
+// Package session provides a way to create service clients with shared configuration
+// and handlers.
+//
+// Generally this package should be used instead of the `defaults` package.
+//
+// A session should be used to share configurations and request handlers between multiple
+// service clients. When service clients need specific configuration aws.Config can be
+// used to provide additional configuration directly to the service client.
+package session
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/corehandlers"
+ "github.com/aws/aws-sdk-go/aws/defaults"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/endpoints"
+)
+
+// A Session provides a central location to create service clients from and
+// store configurations and request handlers for those services.
+//
+// Sessions are safe to create service clients concurrently, but it is not safe
+// to mutate the session concurrently.
+type Session struct {
+ Config *aws.Config
+ Handlers request.Handlers
+}
+
+// New creates a new instance of the handlers merging in the provided Configs
+// on top of the SDK's default configurations. Once the session is created it
+// can be mutated to modify Configs or Handlers. The session is safe to be read
+// concurrently, but it should not be written to concurrently.
+//
+// Example:
+// // Create a session with the default config and request handlers.
+// sess := session.New()
+//
+// // Create a session with a custom region
+// sess := session.New(&aws.Config{Region: aws.String("us-east-1")})
+//
+// // Create a session, and add additional handlers for all service
+// // clients created with the session to inherit. Adds logging handler.
+// sess := session.New()
+// sess.Handlers.Send.PushFront(func(r *request.Request) {
+// // Log every request made and its payload
+// logger.Println("Request: %s/%s, Payload: %s", r.ClientInfo.ServiceName, r.Operation, r.Params)
+// })
+//
+// // Create a S3 client instance from a session
+// sess := session.New()
+// svc := s3.New(sess)
+func New(cfgs ...*aws.Config) *Session {
+ cfg := defaults.Config()
+ handlers := defaults.Handlers()
+
+ // Apply the passed in configs so the configuration can be applied to the
+ // default credential chain
+ cfg.MergeIn(cfgs...)
+ cfg.Credentials = defaults.CredChain(cfg, handlers)
+
+ // Reapply any passed in configs to override credentials if set
+ cfg.MergeIn(cfgs...)
+
+ s := &Session{
+ Config: cfg,
+ Handlers: handlers,
+ }
+
+ initHandlers(s)
+
+ return s
+}
+
+func initHandlers(s *Session) {
+ // Add the Validate parameter handler if it is not disabled.
+ s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler)
+ if !aws.BoolValue(s.Config.DisableParamValidation) {
+ s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler)
+ }
+}
+
+// Copy creates and returns a copy of the current session, coping the config
+// and handlers. If any additional configs are provided they will be merged
+// on top of the session's copied config.
+//
+// Example:
+// // Create a copy of the current session, configured for the us-west-2 region.
+// sess.Copy(&aws.Config{Region: aws.String("us-west-2")})
+func (s *Session) Copy(cfgs ...*aws.Config) *Session {
+ newSession := &Session{
+ Config: s.Config.Copy(cfgs...),
+ Handlers: s.Handlers.Copy(),
+ }
+
+ initHandlers(newSession)
+
+ return newSession
+}
+
+// ClientConfig satisfies the client.ConfigProvider interface and is used to
+// configure the service client instances. Passing the Session to the service
+// client's constructor (New) will use this method to configure the client.
+//
+// Example:
+// sess := session.New()
+// s3.New(sess)
+func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config {
+ s = s.Copy(cfgs...)
+ endpoint, signingRegion := endpoints.NormalizeEndpoint(
+ aws.StringValue(s.Config.Endpoint), serviceName,
+ aws.StringValue(s.Config.Region), aws.BoolValue(s.Config.DisableSSL))
+
+ return client.Config{
+ Config: s.Config,
+ Handlers: s.Handlers,
+ Endpoint: endpoint,
+ SigningRegion: signingRegion,
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go
new file mode 100644
index 0000000..fa014b4
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go
@@ -0,0 +1,106 @@
+package aws
+
+import (
+ "io"
+ "sync"
+)
+
+// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser
+func ReadSeekCloser(r io.Reader) ReaderSeekerCloser {
+ return ReaderSeekerCloser{r}
+}
+
+// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and
+// io.Closer interfaces to the underlying object if they are available.
+type ReaderSeekerCloser struct {
+ r io.Reader
+}
+
+// Read reads from the reader up to size of p. The number of bytes read, and
+// error if it occurred will be returned.
+//
+// If the reader is not an io.Reader zero bytes read, and nil error will be returned.
+//
+// Performs the same functionality as io.Reader Read
+func (r ReaderSeekerCloser) Read(p []byte) (int, error) {
+ switch t := r.r.(type) {
+ case io.Reader:
+ return t.Read(p)
+ }
+ return 0, nil
+}
+
+// Seek sets the offset for the next Read to offset, interpreted according to
+// whence: 0 means relative to the origin of the file, 1 means relative to the
+// current offset, and 2 means relative to the end. Seek returns the new offset
+// and an error, if any.
+//
+// If the ReaderSeekerCloser is not an io.Seeker nothing will be done.
+func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) {
+ switch t := r.r.(type) {
+ case io.Seeker:
+ return t.Seek(offset, whence)
+ }
+ return int64(0), nil
+}
+
+// Close closes the ReaderSeekerCloser.
+//
+// If the ReaderSeekerCloser is not an io.Closer nothing will be done.
+func (r ReaderSeekerCloser) Close() error {
+ switch t := r.r.(type) {
+ case io.Closer:
+ return t.Close()
+ }
+ return nil
+}
+
+// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface
+// Can be used with the s3manager.Downloader to download content to a buffer
+// in memory. Safe to use concurrently.
+type WriteAtBuffer struct {
+ buf []byte
+ m sync.Mutex
+
+ // GrowthCoeff defines the growth rate of the internal buffer. By
+ // default, the growth rate is 1, where expanding the internal
+ // buffer will allocate only enough capacity to fit the new expected
+ // length.
+ GrowthCoeff float64
+}
+
+// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer
+// provided by buf.
+func NewWriteAtBuffer(buf []byte) *WriteAtBuffer {
+ return &WriteAtBuffer{buf: buf}
+}
+
+// WriteAt writes a slice of bytes to a buffer starting at the position provided
+// The number of bytes written will be returned, or error. Can overwrite previous
+// written slices if the write ats overlap.
+func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) {
+ pLen := len(p)
+ expLen := pos + int64(pLen)
+ b.m.Lock()
+ defer b.m.Unlock()
+ if int64(len(b.buf)) < expLen {
+ if int64(cap(b.buf)) < expLen {
+ if b.GrowthCoeff < 1 {
+ b.GrowthCoeff = 1
+ }
+ newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen)))
+ copy(newBuf, b.buf)
+ b.buf = newBuf
+ }
+ b.buf = b.buf[:expLen]
+ }
+ copy(b.buf[pos:], p)
+ return pLen, nil
+}
+
+// Bytes returns a slice of bytes written to the buffer.
+func (b *WriteAtBuffer) Bytes() []byte {
+ b.m.Lock()
+ defer b.m.Unlock()
+ return b.buf[:len(b.buf):len(b.buf)]
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go
new file mode 100644
index 0000000..79a260d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go
@@ -0,0 +1,8 @@
+// Package aws provides core functionality for making requests to AWS services.
+package aws
+
+// SDKName is the name of this AWS SDK
+const SDKName = "aws-sdk-go"
+
+// SDKVersion is the version of this SDK
+const SDKVersion = "1.1.32"
diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go
new file mode 100644
index 0000000..2b279e6
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go
@@ -0,0 +1,65 @@
+// Package endpoints validates regional endpoints for services.
+package endpoints
+
+//go:generate go run ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go
+//go:generate gofmt -s -w endpoints_map.go
+
+import (
+ "fmt"
+ "regexp"
+ "strings"
+)
+
+// NormalizeEndpoint takes and endpoint and service API information to return a
+// normalized endpoint and signing region. If the endpoint is not an empty string
+// the service name and region will be used to look up the service's API endpoint.
+// If the endpoint is provided the scheme will be added if it is not present.
+func NormalizeEndpoint(endpoint, serviceName, region string, disableSSL bool) (normEndpoint, signingRegion string) {
+ if endpoint == "" {
+ return EndpointForRegion(serviceName, region, disableSSL)
+ }
+
+ return AddScheme(endpoint, disableSSL), ""
+}
+
+// EndpointForRegion returns an endpoint and its signing region for a service and region.
+// if the service and region pair are not found endpoint and signingRegion will be empty.
+func EndpointForRegion(svcName, region string, disableSSL bool) (endpoint, signingRegion string) {
+ derivedKeys := []string{
+ region + "/" + svcName,
+ region + "/*",
+ "*/" + svcName,
+ "*/*",
+ }
+
+ for _, key := range derivedKeys {
+ if val, ok := endpointsMap.Endpoints[key]; ok {
+ ep := val.Endpoint
+ ep = strings.Replace(ep, "{region}", region, -1)
+ ep = strings.Replace(ep, "{service}", svcName, -1)
+
+ endpoint = ep
+ signingRegion = val.SigningRegion
+ break
+ }
+ }
+
+ return AddScheme(endpoint, disableSSL), signingRegion
+}
+
+// Regular expression to determine if the endpoint string is prefixed with a scheme.
+var schemeRE = regexp.MustCompile("^([^:]+)://")
+
+// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no
+// scheme. If disableSSL is true HTTP will be added instead of the default HTTPS.
+func AddScheme(endpoint string, disableSSL bool) string {
+ if endpoint != "" && !schemeRE.MatchString(endpoint) {
+ scheme := "https"
+ if disableSSL {
+ scheme = "http"
+ }
+ endpoint = fmt.Sprintf("%s://%s", scheme, endpoint)
+ }
+
+ return endpoint
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json
new file mode 100644
index 0000000..5f4991c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json
@@ -0,0 +1,75 @@
+{
+ "version": 2,
+ "endpoints": {
+ "*/*": {
+ "endpoint": "{service}.{region}.amazonaws.com"
+ },
+ "cn-north-1/*": {
+ "endpoint": "{service}.{region}.amazonaws.com.cn",
+ "signatureVersion": "v4"
+ },
+ "cn-north-1/ec2metadata": {
+ "endpoint": "http://169.254.169.254/latest"
+ },
+ "us-gov-west-1/iam": {
+ "endpoint": "iam.us-gov.amazonaws.com"
+ },
+ "us-gov-west-1/sts": {
+ "endpoint": "sts.us-gov-west-1.amazonaws.com"
+ },
+ "us-gov-west-1/s3": {
+ "endpoint": "s3-{region}.amazonaws.com"
+ },
+ "us-gov-west-1/ec2metadata": {
+ "endpoint": "http://169.254.169.254/latest"
+ },
+ "*/cloudfront": {
+ "endpoint": "cloudfront.amazonaws.com",
+ "signingRegion": "us-east-1"
+ },
+ "*/cloudsearchdomain": {
+ "endpoint": "",
+ "signingRegion": "us-east-1"
+ },
+ "*/data.iot": {
+ "endpoint": "",
+ "signingRegion": "us-east-1"
+ },
+ "*/ec2metadata": {
+ "endpoint": "http://169.254.169.254/latest"
+ },
+ "*/iam": {
+ "endpoint": "iam.amazonaws.com",
+ "signingRegion": "us-east-1"
+ },
+ "*/importexport": {
+ "endpoint": "importexport.amazonaws.com",
+ "signingRegion": "us-east-1"
+ },
+ "*/route53": {
+ "endpoint": "route53.amazonaws.com",
+ "signingRegion": "us-east-1"
+ },
+ "*/sts": {
+ "endpoint": "sts.amazonaws.com",
+ "signingRegion": "us-east-1"
+ },
+ "*/waf": {
+ "endpoint": "waf.amazonaws.com",
+ "signingRegion": "us-east-1"
+ },
+ "us-east-1/sdb": {
+ "endpoint": "sdb.amazonaws.com",
+ "signingRegion": "us-east-1"
+ },
+ "*/s3": {
+ "endpoint": "s3-{region}.amazonaws.com"
+ },
+ "us-east-1/s3": {
+ "endpoint": "s3.amazonaws.com"
+ },
+ "eu-central-1/s3": {
+ "endpoint": "{service}.{region}.amazonaws.com"
+ }
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go
new file mode 100644
index 0000000..e995315
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go
@@ -0,0 +1,88 @@
+package endpoints
+
+// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
+
+type endpointStruct struct {
+ Version int
+ Endpoints map[string]endpointEntry
+}
+
+type endpointEntry struct {
+ Endpoint string
+ SigningRegion string
+}
+
+var endpointsMap = endpointStruct{
+ Version: 2,
+ Endpoints: map[string]endpointEntry{
+ "*/*": {
+ Endpoint: "{service}.{region}.amazonaws.com",
+ },
+ "*/cloudfront": {
+ Endpoint: "cloudfront.amazonaws.com",
+ SigningRegion: "us-east-1",
+ },
+ "*/cloudsearchdomain": {
+ Endpoint: "",
+ SigningRegion: "us-east-1",
+ },
+ "*/data.iot": {
+ Endpoint: "",
+ SigningRegion: "us-east-1",
+ },
+ "*/ec2metadata": {
+ Endpoint: "http://169.254.169.254/latest",
+ },
+ "*/iam": {
+ Endpoint: "iam.amazonaws.com",
+ SigningRegion: "us-east-1",
+ },
+ "*/importexport": {
+ Endpoint: "importexport.amazonaws.com",
+ SigningRegion: "us-east-1",
+ },
+ "*/route53": {
+ Endpoint: "route53.amazonaws.com",
+ SigningRegion: "us-east-1",
+ },
+ "*/s3": {
+ Endpoint: "s3-{region}.amazonaws.com",
+ },
+ "*/sts": {
+ Endpoint: "sts.amazonaws.com",
+ SigningRegion: "us-east-1",
+ },
+ "*/waf": {
+ Endpoint: "waf.amazonaws.com",
+ SigningRegion: "us-east-1",
+ },
+ "cn-north-1/*": {
+ Endpoint: "{service}.{region}.amazonaws.com.cn",
+ },
+ "cn-north-1/ec2metadata": {
+ Endpoint: "http://169.254.169.254/latest",
+ },
+ "eu-central-1/s3": {
+ Endpoint: "{service}.{region}.amazonaws.com",
+ },
+ "us-east-1/s3": {
+ Endpoint: "s3.amazonaws.com",
+ },
+ "us-east-1/sdb": {
+ Endpoint: "sdb.amazonaws.com",
+ SigningRegion: "us-east-1",
+ },
+ "us-gov-west-1/ec2metadata": {
+ Endpoint: "http://169.254.169.254/latest",
+ },
+ "us-gov-west-1/iam": {
+ Endpoint: "iam.us-gov.amazonaws.com",
+ },
+ "us-gov-west-1/s3": {
+ Endpoint: "s3-{region}.amazonaws.com",
+ },
+ "us-gov-west-1/sts": {
+ Endpoint: "sts.us-gov-west-1.amazonaws.com",
+ },
+ },
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go
new file mode 100644
index 0000000..53831df
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go
@@ -0,0 +1,75 @@
+package protocol
+
+import (
+ "crypto/rand"
+ "fmt"
+ "reflect"
+)
+
+// RandReader is the random reader the protocol package will use to read
+// random bytes from. This is exported for testing, and should not be used.
+var RandReader = rand.Reader
+
+const idempotencyTokenFillTag = `idempotencyToken`
+
+// CanSetIdempotencyToken returns true if the struct field should be
+// automatically populated with a Idempotency token.
+//
+// Only *string and string type fields that are tagged with idempotencyToken
+// which are not already set can be auto filled.
+func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool {
+ switch u := v.Interface().(type) {
+ // To auto fill an Idempotency token the field must be a string,
+ // tagged for auto fill, and have a zero value.
+ case *string:
+ return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0
+ case string:
+ return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0
+ }
+
+ return false
+}
+
+// GetIdempotencyToken returns a randomly generated idempotency token.
+func GetIdempotencyToken() string {
+ b := make([]byte, 16)
+ RandReader.Read(b)
+
+ return UUIDVersion4(b)
+}
+
+// SetIdempotencyToken will set the value provided with a Idempotency Token.
+// Given that the value can be set. Will panic if value is not setable.
+func SetIdempotencyToken(v reflect.Value) {
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() && v.CanSet() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ v = v.Elem()
+ }
+ v = reflect.Indirect(v)
+
+ if !v.CanSet() {
+ panic(fmt.Sprintf("unable to set idempotnecy token %v", v))
+ }
+
+ b := make([]byte, 16)
+ _, err := rand.Read(b)
+ if err != nil {
+ // TODO handle error
+ return
+ }
+
+ v.Set(reflect.ValueOf(UUIDVersion4(b)))
+}
+
+// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided
+func UUIDVersion4(u []byte) string {
+ // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29
+ // 13th character is "4"
+ u[6] = (u[6] | 0x40) & 0x4F
+ // 17th character is "8", "9", "a", or "b"
+ u[8] = (u[8] | 0x80) & 0xBF
+
+ return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:])
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go
new file mode 100644
index 0000000..56d69db
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go
@@ -0,0 +1,36 @@
+// Package query provides serialisation of AWS query requests, and responses.
+package query
+
+//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go
+
+import (
+ "net/url"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol/query/queryutil"
+)
+
+// BuildHandler is a named request handler for building query protocol requests
+var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build}
+
+// Build builds a request for an AWS Query service.
+func Build(r *request.Request) {
+ body := url.Values{
+ "Action": {r.Operation.Name},
+ "Version": {r.ClientInfo.APIVersion},
+ }
+ if err := queryutil.Parse(body, r.Params, false); err != nil {
+ r.Error = awserr.New("SerializationError", "failed encoding Query request", err)
+ return
+ }
+
+ if r.ExpireTime == 0 {
+ r.HTTPRequest.Method = "POST"
+ r.HTTPRequest.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8")
+ r.SetBufferBody([]byte(body.Encode()))
+ } else { // This is a pre-signed request
+ r.HTTPRequest.Method = "GET"
+ r.HTTPRequest.URL.RawQuery = body.Encode()
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
new file mode 100644
index 0000000..60ea0bd
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
@@ -0,0 +1,230 @@
+package queryutil
+
+import (
+ "encoding/base64"
+ "fmt"
+ "net/url"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/private/protocol"
+)
+
+// Parse parses an object i and fills a url.Values object. The isEC2 flag
+// indicates if this is the EC2 Query sub-protocol.
+func Parse(body url.Values, i interface{}, isEC2 bool) error {
+ q := queryParser{isEC2: isEC2}
+ return q.parseValue(body, reflect.ValueOf(i), "", "")
+}
+
+func elemOf(value reflect.Value) reflect.Value {
+ for value.Kind() == reflect.Ptr {
+ value = value.Elem()
+ }
+ return value
+}
+
+type queryParser struct {
+ isEC2 bool
+}
+
+func (q *queryParser) parseValue(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
+ value = elemOf(value)
+
+ // no need to handle zero values
+ if !value.IsValid() {
+ return nil
+ }
+
+ t := tag.Get("type")
+ if t == "" {
+ switch value.Kind() {
+ case reflect.Struct:
+ t = "structure"
+ case reflect.Slice:
+ t = "list"
+ case reflect.Map:
+ t = "map"
+ }
+ }
+
+ switch t {
+ case "structure":
+ return q.parseStruct(v, value, prefix)
+ case "list":
+ return q.parseList(v, value, prefix, tag)
+ case "map":
+ return q.parseMap(v, value, prefix, tag)
+ default:
+ return q.parseScalar(v, value, prefix, tag)
+ }
+}
+
+func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix string) error {
+ if !value.IsValid() {
+ return nil
+ }
+
+ t := value.Type()
+ for i := 0; i < value.NumField(); i++ {
+ elemValue := elemOf(value.Field(i))
+ field := t.Field(i)
+
+ if field.PkgPath != "" {
+ continue // ignore unexported fields
+ }
+
+ if protocol.CanSetIdempotencyToken(value.Field(i), field) {
+ token := protocol.GetIdempotencyToken()
+ elemValue = reflect.ValueOf(token)
+ }
+
+ var name string
+ if q.isEC2 {
+ name = field.Tag.Get("queryName")
+ }
+ if name == "" {
+ if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" {
+ name = field.Tag.Get("locationNameList")
+ } else if locName := field.Tag.Get("locationName"); locName != "" {
+ name = locName
+ }
+ if name != "" && q.isEC2 {
+ name = strings.ToUpper(name[0:1]) + name[1:]
+ }
+ }
+ if name == "" {
+ name = field.Name
+ }
+
+ if prefix != "" {
+ name = prefix + "." + name
+ }
+
+ if err := q.parseValue(v, elemValue, name, field.Tag); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
+ // If it's empty, generate an empty value
+ if !value.IsNil() && value.Len() == 0 {
+ v.Set(prefix, "")
+ return nil
+ }
+
+ // check for unflattened list member
+ if !q.isEC2 && tag.Get("flattened") == "" {
+ prefix += ".member"
+ }
+
+ for i := 0; i < value.Len(); i++ {
+ slicePrefix := prefix
+ if slicePrefix == "" {
+ slicePrefix = strconv.Itoa(i + 1)
+ } else {
+ slicePrefix = slicePrefix + "." + strconv.Itoa(i+1)
+ }
+ if err := q.parseValue(v, value.Index(i), slicePrefix, ""); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (q *queryParser) parseMap(v url.Values, value reflect.Value, prefix string, tag reflect.StructTag) error {
+ // If it's empty, generate an empty value
+ if !value.IsNil() && value.Len() == 0 {
+ v.Set(prefix, "")
+ return nil
+ }
+
+ // check for unflattened list member
+ if !q.isEC2 && tag.Get("flattened") == "" {
+ prefix += ".entry"
+ }
+
+ // sort keys for improved serialization consistency.
+ // this is not strictly necessary for protocol support.
+ mapKeyValues := value.MapKeys()
+ mapKeys := map[string]reflect.Value{}
+ mapKeyNames := make([]string, len(mapKeyValues))
+ for i, mapKey := range mapKeyValues {
+ name := mapKey.String()
+ mapKeys[name] = mapKey
+ mapKeyNames[i] = name
+ }
+ sort.Strings(mapKeyNames)
+
+ for i, mapKeyName := range mapKeyNames {
+ mapKey := mapKeys[mapKeyName]
+ mapValue := value.MapIndex(mapKey)
+
+ kname := tag.Get("locationNameKey")
+ if kname == "" {
+ kname = "key"
+ }
+ vname := tag.Get("locationNameValue")
+ if vname == "" {
+ vname = "value"
+ }
+
+ // serialize key
+ var keyName string
+ if prefix == "" {
+ keyName = strconv.Itoa(i+1) + "." + kname
+ } else {
+ keyName = prefix + "." + strconv.Itoa(i+1) + "." + kname
+ }
+
+ if err := q.parseValue(v, mapKey, keyName, ""); err != nil {
+ return err
+ }
+
+ // serialize value
+ var valueName string
+ if prefix == "" {
+ valueName = strconv.Itoa(i+1) + "." + vname
+ } else {
+ valueName = prefix + "." + strconv.Itoa(i+1) + "." + vname
+ }
+
+ if err := q.parseValue(v, mapValue, valueName, ""); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (q *queryParser) parseScalar(v url.Values, r reflect.Value, name string, tag reflect.StructTag) error {
+ switch value := r.Interface().(type) {
+ case string:
+ v.Set(name, value)
+ case []byte:
+ if !r.IsNil() {
+ v.Set(name, base64.StdEncoding.EncodeToString(value))
+ }
+ case bool:
+ v.Set(name, strconv.FormatBool(value))
+ case int64:
+ v.Set(name, strconv.FormatInt(value, 10))
+ case int:
+ v.Set(name, strconv.Itoa(value))
+ case float64:
+ v.Set(name, strconv.FormatFloat(value, 'f', -1, 64))
+ case float32:
+ v.Set(name, strconv.FormatFloat(float64(value), 'f', -1, 32))
+ case time.Time:
+ const ISO8601UTC = "2006-01-02T15:04:05Z"
+ v.Set(name, value.UTC().Format(ISO8601UTC))
+ default:
+ return fmt.Errorf("unsupported value for param %s: %v (%s)", name, r.Interface(), r.Type().Name())
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go
new file mode 100644
index 0000000..a3ea409
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go
@@ -0,0 +1,35 @@
+package query
+
+//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go
+
+import (
+ "encoding/xml"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
+)
+
+// UnmarshalHandler is a named request handler for unmarshaling query protocol requests
+var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal}
+
+// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata
+var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta}
+
+// Unmarshal unmarshals a response for an AWS Query service.
+func Unmarshal(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+ if r.DataFilled() {
+ decoder := xml.NewDecoder(r.HTTPResponse.Body)
+ err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result")
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed decoding Query response", err)
+ return
+ }
+ }
+}
+
+// UnmarshalMeta unmarshals header response values for an AWS Query service.
+func UnmarshalMeta(r *request.Request) {
+ r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid")
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go
new file mode 100644
index 0000000..f214296
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go
@@ -0,0 +1,66 @@
+package query
+
+import (
+ "encoding/xml"
+ "io/ioutil"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+type xmlErrorResponse struct {
+ XMLName xml.Name `xml:"ErrorResponse"`
+ Code string `xml:"Error>Code"`
+ Message string `xml:"Error>Message"`
+ RequestID string `xml:"RequestId"`
+}
+
+type xmlServiceUnavailableResponse struct {
+ XMLName xml.Name `xml:"ServiceUnavailableException"`
+}
+
+// UnmarshalErrorHandler is a name request handler to unmarshal request errors
+var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError}
+
+// UnmarshalError unmarshals an error response for an AWS Query service.
+func UnmarshalError(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+
+ bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed to read from query HTTP response body", err)
+ return
+ }
+
+ // First check for specific error
+ resp := xmlErrorResponse{}
+ decodeErr := xml.Unmarshal(bodyBytes, &resp)
+ if decodeErr == nil {
+ reqID := resp.RequestID
+ if reqID == "" {
+ reqID = r.RequestID
+ }
+ r.Error = awserr.NewRequestFailure(
+ awserr.New(resp.Code, resp.Message, nil),
+ r.HTTPResponse.StatusCode,
+ reqID,
+ )
+ return
+ }
+
+ // Check for unhandled error
+ servUnavailResp := xmlServiceUnavailableResponse{}
+ unavailErr := xml.Unmarshal(bodyBytes, &servUnavailResp)
+ if unavailErr == nil {
+ r.Error = awserr.NewRequestFailure(
+ awserr.New("ServiceUnavailableException", "service is unavailable", nil),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+ return
+ }
+
+ // Failed to retrieve any error message from the response body
+ r.Error = awserr.New("SerializationError",
+ "failed to decode query XML error response", decodeErr)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
new file mode 100644
index 0000000..5f41251
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
@@ -0,0 +1,256 @@
+// Package rest provides RESTful serialization of AWS requests and responses.
+package rest
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "path"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// RFC822 returns an RFC822 formatted timestamp for AWS protocols
+const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT"
+
+// Whether the byte value can be sent without escaping in AWS URLs
+var noEscape [256]bool
+
+var errValueNotSet = fmt.Errorf("value not set")
+
+func init() {
+ for i := 0; i < len(noEscape); i++ {
+ // AWS expects every character except these to be escaped
+ noEscape[i] = (i >= 'A' && i <= 'Z') ||
+ (i >= 'a' && i <= 'z') ||
+ (i >= '0' && i <= '9') ||
+ i == '-' ||
+ i == '.' ||
+ i == '_' ||
+ i == '~'
+ }
+}
+
+// BuildHandler is a named request handler for building rest protocol requests
+var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build}
+
+// Build builds the REST component of a service request.
+func Build(r *request.Request) {
+ if r.ParamsFilled() {
+ v := reflect.ValueOf(r.Params).Elem()
+ buildLocationElements(r, v)
+ buildBody(r, v)
+ }
+}
+
+func buildLocationElements(r *request.Request, v reflect.Value) {
+ query := r.HTTPRequest.URL.Query()
+
+ for i := 0; i < v.NumField(); i++ {
+ m := v.Field(i)
+ if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) {
+ continue
+ }
+
+ if m.IsValid() {
+ field := v.Type().Field(i)
+ name := field.Tag.Get("locationName")
+ if name == "" {
+ name = field.Name
+ }
+ if m.Kind() == reflect.Ptr {
+ m = m.Elem()
+ }
+ if !m.IsValid() {
+ continue
+ }
+
+ var err error
+ switch field.Tag.Get("location") {
+ case "headers": // header maps
+ err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag.Get("locationName"))
+ case "header":
+ err = buildHeader(&r.HTTPRequest.Header, m, name)
+ case "uri":
+ err = buildURI(r.HTTPRequest.URL, m, name)
+ case "querystring":
+ err = buildQueryString(query, m, name)
+ }
+ r.Error = err
+ }
+ if r.Error != nil {
+ return
+ }
+ }
+
+ r.HTTPRequest.URL.RawQuery = query.Encode()
+ updatePath(r.HTTPRequest.URL, r.HTTPRequest.URL.Path)
+}
+
+func buildBody(r *request.Request, v reflect.Value) {
+ if field, ok := v.Type().FieldByName("_"); ok {
+ if payloadName := field.Tag.Get("payload"); payloadName != "" {
+ pfield, _ := v.Type().FieldByName(payloadName)
+ if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" {
+ payload := reflect.Indirect(v.FieldByName(payloadName))
+ if payload.IsValid() && payload.Interface() != nil {
+ switch reader := payload.Interface().(type) {
+ case io.ReadSeeker:
+ r.SetReaderBody(reader)
+ case []byte:
+ r.SetBufferBody(reader)
+ case string:
+ r.SetStringBody(reader)
+ default:
+ r.Error = awserr.New("SerializationError",
+ "failed to encode REST request",
+ fmt.Errorf("unknown payload type %s", payload.Type()))
+ }
+ }
+ }
+ }
+ }
+}
+
+func buildHeader(header *http.Header, v reflect.Value, name string) error {
+ str, err := convertType(v)
+ if err == errValueNotSet {
+ return nil
+ } else if err != nil {
+ return awserr.New("SerializationError", "failed to encode REST request", err)
+ }
+
+ header.Add(name, str)
+
+ return nil
+}
+
+func buildHeaderMap(header *http.Header, v reflect.Value, prefix string) error {
+ for _, key := range v.MapKeys() {
+ str, err := convertType(v.MapIndex(key))
+ if err == errValueNotSet {
+ continue
+ } else if err != nil {
+ return awserr.New("SerializationError", "failed to encode REST request", err)
+
+ }
+
+ header.Add(prefix+key.String(), str)
+ }
+ return nil
+}
+
+func buildURI(u *url.URL, v reflect.Value, name string) error {
+ value, err := convertType(v)
+ if err == errValueNotSet {
+ return nil
+ } else if err != nil {
+ return awserr.New("SerializationError", "failed to encode REST request", err)
+ }
+
+ uri := u.Path
+ uri = strings.Replace(uri, "{"+name+"}", EscapePath(value, true), -1)
+ uri = strings.Replace(uri, "{"+name+"+}", EscapePath(value, false), -1)
+ u.Path = uri
+
+ return nil
+}
+
+func buildQueryString(query url.Values, v reflect.Value, name string) error {
+ switch value := v.Interface().(type) {
+ case []*string:
+ for _, item := range value {
+ query.Add(name, *item)
+ }
+ case map[string]*string:
+ for key, item := range value {
+ query.Add(key, *item)
+ }
+ case map[string][]*string:
+ for key, items := range value {
+ for _, item := range items {
+ query.Add(key, *item)
+ }
+ }
+ default:
+ str, err := convertType(v)
+ if err == errValueNotSet {
+ return nil
+ } else if err != nil {
+ return awserr.New("SerializationError", "failed to encode REST request", err)
+ }
+ query.Set(name, str)
+ }
+
+ return nil
+}
+
+func updatePath(url *url.URL, urlPath string) {
+ scheme, query := url.Scheme, url.RawQuery
+
+ hasSlash := strings.HasSuffix(urlPath, "/")
+
+ // clean up path
+ urlPath = path.Clean(urlPath)
+ if hasSlash && !strings.HasSuffix(urlPath, "/") {
+ urlPath += "/"
+ }
+
+ // get formatted URL minus scheme so we can build this into Opaque
+ url.Scheme, url.Path, url.RawQuery = "", "", ""
+ s := url.String()
+ url.Scheme = scheme
+ url.RawQuery = query
+
+ // build opaque URI
+ url.Opaque = s + urlPath
+}
+
+// EscapePath escapes part of a URL path in Amazon style
+func EscapePath(path string, encodeSep bool) string {
+ var buf bytes.Buffer
+ for i := 0; i < len(path); i++ {
+ c := path[i]
+ if noEscape[c] || (c == '/' && !encodeSep) {
+ buf.WriteByte(c)
+ } else {
+ fmt.Fprintf(&buf, "%%%02X", c)
+ }
+ }
+ return buf.String()
+}
+
+func convertType(v reflect.Value) (string, error) {
+ v = reflect.Indirect(v)
+ if !v.IsValid() {
+ return "", errValueNotSet
+ }
+
+ var str string
+ switch value := v.Interface().(type) {
+ case string:
+ str = value
+ case []byte:
+ str = base64.StdEncoding.EncodeToString(value)
+ case bool:
+ str = strconv.FormatBool(value)
+ case int64:
+ str = strconv.FormatInt(value, 10)
+ case float64:
+ str = strconv.FormatFloat(value, 'f', -1, 64)
+ case time.Time:
+ str = value.UTC().Format(RFC822)
+ default:
+ err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type())
+ return "", err
+ }
+ return str, nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go
new file mode 100644
index 0000000..4366de2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go
@@ -0,0 +1,45 @@
+package rest
+
+import "reflect"
+
+// PayloadMember returns the payload field member of i if there is one, or nil.
+func PayloadMember(i interface{}) interface{} {
+ if i == nil {
+ return nil
+ }
+
+ v := reflect.ValueOf(i).Elem()
+ if !v.IsValid() {
+ return nil
+ }
+ if field, ok := v.Type().FieldByName("_"); ok {
+ if payloadName := field.Tag.Get("payload"); payloadName != "" {
+ field, _ := v.Type().FieldByName(payloadName)
+ if field.Tag.Get("type") != "structure" {
+ return nil
+ }
+
+ payload := v.FieldByName(payloadName)
+ if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) {
+ return payload.Interface()
+ }
+ }
+ }
+ return nil
+}
+
+// PayloadType returns the type of a payload field member of i if there is one, or "".
+func PayloadType(i interface{}) string {
+ v := reflect.Indirect(reflect.ValueOf(i))
+ if !v.IsValid() {
+ return ""
+ }
+ if field, ok := v.Type().FieldByName("_"); ok {
+ if payloadName := field.Tag.Get("payload"); payloadName != "" {
+ if member, ok := v.Type().FieldByName(payloadName); ok {
+ return member.Tag.Get("type")
+ }
+ }
+ }
+ return ""
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
new file mode 100644
index 0000000..2cba1d9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
@@ -0,0 +1,198 @@
+package rest
+
+import (
+ "encoding/base64"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests
+var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal}
+
+// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata
+var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta}
+
+// Unmarshal unmarshals the REST component of a response in a REST service.
+func Unmarshal(r *request.Request) {
+ if r.DataFilled() {
+ v := reflect.Indirect(reflect.ValueOf(r.Data))
+ unmarshalBody(r, v)
+ }
+}
+
+// UnmarshalMeta unmarshals the REST metadata of a response in a REST service
+func UnmarshalMeta(r *request.Request) {
+ r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid")
+ if r.RequestID == "" {
+ // Alternative version of request id in the header
+ r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id")
+ }
+ if r.DataFilled() {
+ v := reflect.Indirect(reflect.ValueOf(r.Data))
+ unmarshalLocationElements(r, v)
+ }
+}
+
+func unmarshalBody(r *request.Request, v reflect.Value) {
+ if field, ok := v.Type().FieldByName("_"); ok {
+ if payloadName := field.Tag.Get("payload"); payloadName != "" {
+ pfield, _ := v.Type().FieldByName(payloadName)
+ if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" {
+ payload := v.FieldByName(payloadName)
+ if payload.IsValid() {
+ switch payload.Interface().(type) {
+ case []byte:
+ defer r.HTTPResponse.Body.Close()
+ b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
+ } else {
+ payload.Set(reflect.ValueOf(b))
+ }
+ case *string:
+ defer r.HTTPResponse.Body.Close()
+ b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
+ } else {
+ str := string(b)
+ payload.Set(reflect.ValueOf(&str))
+ }
+ default:
+ switch payload.Type().String() {
+ case "io.ReadSeeker":
+ payload.Set(reflect.ValueOf(aws.ReadSeekCloser(r.HTTPResponse.Body)))
+ case "aws.ReadSeekCloser", "io.ReadCloser":
+ payload.Set(reflect.ValueOf(r.HTTPResponse.Body))
+ default:
+ io.Copy(ioutil.Discard, r.HTTPResponse.Body)
+ defer r.HTTPResponse.Body.Close()
+ r.Error = awserr.New("SerializationError",
+ "failed to decode REST response",
+ fmt.Errorf("unknown payload type %s", payload.Type()))
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+func unmarshalLocationElements(r *request.Request, v reflect.Value) {
+ for i := 0; i < v.NumField(); i++ {
+ m, field := v.Field(i), v.Type().Field(i)
+ if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) {
+ continue
+ }
+
+ if m.IsValid() {
+ name := field.Tag.Get("locationName")
+ if name == "" {
+ name = field.Name
+ }
+
+ switch field.Tag.Get("location") {
+ case "statusCode":
+ unmarshalStatusCode(m, r.HTTPResponse.StatusCode)
+ case "header":
+ err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name))
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
+ break
+ }
+ case "headers":
+ prefix := field.Tag.Get("locationName")
+ err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix)
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
+ break
+ }
+ }
+ }
+ if r.Error != nil {
+ return
+ }
+ }
+}
+
+func unmarshalStatusCode(v reflect.Value, statusCode int) {
+ if !v.IsValid() {
+ return
+ }
+
+ switch v.Interface().(type) {
+ case *int64:
+ s := int64(statusCode)
+ v.Set(reflect.ValueOf(&s))
+ }
+}
+
+func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error {
+ switch r.Interface().(type) {
+ case map[string]*string: // we only support string map value types
+ out := map[string]*string{}
+ for k, v := range headers {
+ k = http.CanonicalHeaderKey(k)
+ if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) {
+ out[k[len(prefix):]] = &v[0]
+ }
+ }
+ r.Set(reflect.ValueOf(out))
+ }
+ return nil
+}
+
+func unmarshalHeader(v reflect.Value, header string) error {
+ if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) {
+ return nil
+ }
+
+ switch v.Interface().(type) {
+ case *string:
+ v.Set(reflect.ValueOf(&header))
+ case []byte:
+ b, err := base64.StdEncoding.DecodeString(header)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&b))
+ case *bool:
+ b, err := strconv.ParseBool(header)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&b))
+ case *int64:
+ i, err := strconv.ParseInt(header, 10, 64)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&i))
+ case *float64:
+ f, err := strconv.ParseFloat(header, 64)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&f))
+ case *time.Time:
+ t, err := time.Parse(RFC822, header)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(&t))
+ default:
+ err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type())
+ return err
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go
new file mode 100644
index 0000000..c74088b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go
@@ -0,0 +1,69 @@
+// Package restxml provides RESTful XML serialisation of AWS
+// requests and responses.
+package restxml
+
+//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/rest-xml.json build_test.go
+//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go
+
+import (
+ "bytes"
+ "encoding/xml"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol/query"
+ "github.com/aws/aws-sdk-go/private/protocol/rest"
+ "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
+)
+
+// BuildHandler is a named request handler for building restxml protocol requests
+var BuildHandler = request.NamedHandler{Name: "awssdk.restxml.Build", Fn: Build}
+
+// UnmarshalHandler is a named request handler for unmarshaling restxml protocol requests
+var UnmarshalHandler = request.NamedHandler{Name: "awssdk.restxml.Unmarshal", Fn: Unmarshal}
+
+// UnmarshalMetaHandler is a named request handler for unmarshaling restxml protocol request metadata
+var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalMeta", Fn: UnmarshalMeta}
+
+// UnmarshalErrorHandler is a named request handler for unmarshaling restxml protocol request errors
+var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalError", Fn: UnmarshalError}
+
+// Build builds a request payload for the REST XML protocol.
+func Build(r *request.Request) {
+ rest.Build(r)
+
+ if t := rest.PayloadType(r.Params); t == "structure" || t == "" {
+ var buf bytes.Buffer
+ err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf))
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed to encode rest XML request", err)
+ return
+ }
+ r.SetBufferBody(buf.Bytes())
+ }
+}
+
+// Unmarshal unmarshals a payload response for the REST XML protocol.
+func Unmarshal(r *request.Request) {
+ if t := rest.PayloadType(r.Data); t == "structure" || t == "" {
+ defer r.HTTPResponse.Body.Close()
+ decoder := xml.NewDecoder(r.HTTPResponse.Body)
+ err := xmlutil.UnmarshalXML(r.Data, decoder, "")
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed to decode REST XML response", err)
+ return
+ }
+ } else {
+ rest.Unmarshal(r)
+ }
+}
+
+// UnmarshalMeta unmarshals response headers for the REST XML protocol.
+func UnmarshalMeta(r *request.Request) {
+ rest.UnmarshalMeta(r)
+}
+
+// UnmarshalError unmarshals a response error for the REST XML protocol.
+func UnmarshalError(r *request.Request) {
+ query.UnmarshalError(r)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go
new file mode 100644
index 0000000..da1a681
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go
@@ -0,0 +1,21 @@
+package protocol
+
+import (
+ "io"
+ "io/ioutil"
+
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body
+var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody}
+
+// UnmarshalDiscardBody is a request handler to empty a response's body and closing it.
+func UnmarshalDiscardBody(r *request.Request) {
+ if r.HTTPResponse == nil || r.HTTPResponse.Body == nil {
+ return
+ }
+
+ io.Copy(ioutil.Discard, r.HTTPResponse.Body)
+ r.HTTPResponse.Body.Close()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
new file mode 100644
index 0000000..ceb4132
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
@@ -0,0 +1,293 @@
+// Package xmlutil provides XML serialisation of AWS requests and responses.
+package xmlutil
+
+import (
+ "encoding/base64"
+ "encoding/xml"
+ "fmt"
+ "reflect"
+ "sort"
+ "strconv"
+ "time"
+
+ "github.com/aws/aws-sdk-go/private/protocol"
+)
+
+// BuildXML will serialize params into an xml.Encoder.
+// Error will be returned if the serialization of any of the params or nested values fails.
+func BuildXML(params interface{}, e *xml.Encoder) error {
+ b := xmlBuilder{encoder: e, namespaces: map[string]string{}}
+ root := NewXMLElement(xml.Name{})
+ if err := b.buildValue(reflect.ValueOf(params), root, ""); err != nil {
+ return err
+ }
+ for _, c := range root.Children {
+ for _, v := range c {
+ return StructToXML(e, v, false)
+ }
+ }
+ return nil
+}
+
+// Returns the reflection element of a value, if it is a pointer.
+func elemOf(value reflect.Value) reflect.Value {
+ for value.Kind() == reflect.Ptr {
+ value = value.Elem()
+ }
+ return value
+}
+
+// A xmlBuilder serializes values from Go code to XML
+type xmlBuilder struct {
+ encoder *xml.Encoder
+ namespaces map[string]string
+}
+
+// buildValue generic XMLNode builder for any type. Will build value for their specific type
+// struct, list, map, scalar.
+//
+// Also takes a "type" tag value to set what type a value should be converted to XMLNode as. If
+// type is not provided reflect will be used to determine the value's type.
+func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ value = elemOf(value)
+ if !value.IsValid() { // no need to handle zero values
+ return nil
+ } else if tag.Get("location") != "" { // don't handle non-body location values
+ return nil
+ }
+
+ t := tag.Get("type")
+ if t == "" {
+ switch value.Kind() {
+ case reflect.Struct:
+ t = "structure"
+ case reflect.Slice:
+ t = "list"
+ case reflect.Map:
+ t = "map"
+ }
+ }
+
+ switch t {
+ case "structure":
+ if field, ok := value.Type().FieldByName("_"); ok {
+ tag = tag + reflect.StructTag(" ") + field.Tag
+ }
+ return b.buildStruct(value, current, tag)
+ case "list":
+ return b.buildList(value, current, tag)
+ case "map":
+ return b.buildMap(value, current, tag)
+ default:
+ return b.buildScalar(value, current, tag)
+ }
+}
+
+// buildStruct adds a struct and its fields to the current XMLNode. All fields any any nested
+// types are converted to XMLNodes also.
+func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ if !value.IsValid() {
+ return nil
+ }
+
+ fieldAdded := false
+
+ // unwrap payloads
+ if payload := tag.Get("payload"); payload != "" {
+ field, _ := value.Type().FieldByName(payload)
+ tag = field.Tag
+ value = elemOf(value.FieldByName(payload))
+
+ if !value.IsValid() {
+ return nil
+ }
+ }
+
+ child := NewXMLElement(xml.Name{Local: tag.Get("locationName")})
+
+ // there is an xmlNamespace associated with this struct
+ if prefix, uri := tag.Get("xmlPrefix"), tag.Get("xmlURI"); uri != "" {
+ ns := xml.Attr{
+ Name: xml.Name{Local: "xmlns"},
+ Value: uri,
+ }
+ if prefix != "" {
+ b.namespaces[prefix] = uri // register the namespace
+ ns.Name.Local = "xmlns:" + prefix
+ }
+
+ child.Attr = append(child.Attr, ns)
+ }
+
+ t := value.Type()
+ for i := 0; i < value.NumField(); i++ {
+ member := elemOf(value.Field(i))
+ field := t.Field(i)
+
+ if field.PkgPath != "" {
+ continue // ignore unexported fields
+ }
+
+ mTag := field.Tag
+ if mTag.Get("location") != "" { // skip non-body members
+ continue
+ }
+
+ if protocol.CanSetIdempotencyToken(value.Field(i), field) {
+ token := protocol.GetIdempotencyToken()
+ member = reflect.ValueOf(token)
+ }
+
+ memberName := mTag.Get("locationName")
+ if memberName == "" {
+ memberName = field.Name
+ mTag = reflect.StructTag(string(mTag) + ` locationName:"` + memberName + `"`)
+ }
+ if err := b.buildValue(member, child, mTag); err != nil {
+ return err
+ }
+
+ fieldAdded = true
+ }
+
+ if fieldAdded { // only append this child if we have one ore more valid members
+ current.AddChild(child)
+ }
+
+ return nil
+}
+
+// buildList adds the value's list items to the current XMLNode as children nodes. All
+// nested values in the list are converted to XMLNodes also.
+func (b *xmlBuilder) buildList(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ if value.IsNil() { // don't build omitted lists
+ return nil
+ }
+
+ // check for unflattened list member
+ flattened := tag.Get("flattened") != ""
+
+ xname := xml.Name{Local: tag.Get("locationName")}
+ if flattened {
+ for i := 0; i < value.Len(); i++ {
+ child := NewXMLElement(xname)
+ current.AddChild(child)
+ if err := b.buildValue(value.Index(i), child, ""); err != nil {
+ return err
+ }
+ }
+ } else {
+ list := NewXMLElement(xname)
+ current.AddChild(list)
+
+ for i := 0; i < value.Len(); i++ {
+ iname := tag.Get("locationNameList")
+ if iname == "" {
+ iname = "member"
+ }
+
+ child := NewXMLElement(xml.Name{Local: iname})
+ list.AddChild(child)
+ if err := b.buildValue(value.Index(i), child, ""); err != nil {
+ return err
+ }
+ }
+ }
+
+ return nil
+}
+
+// buildMap adds the value's key/value pairs to the current XMLNode as children nodes. All
+// nested values in the map are converted to XMLNodes also.
+//
+// Error will be returned if it is unable to build the map's values into XMLNodes
+func (b *xmlBuilder) buildMap(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ if value.IsNil() { // don't build omitted maps
+ return nil
+ }
+
+ maproot := NewXMLElement(xml.Name{Local: tag.Get("locationName")})
+ current.AddChild(maproot)
+ current = maproot
+
+ kname, vname := "key", "value"
+ if n := tag.Get("locationNameKey"); n != "" {
+ kname = n
+ }
+ if n := tag.Get("locationNameValue"); n != "" {
+ vname = n
+ }
+
+ // sorting is not required for compliance, but it makes testing easier
+ keys := make([]string, value.Len())
+ for i, k := range value.MapKeys() {
+ keys[i] = k.String()
+ }
+ sort.Strings(keys)
+
+ for _, k := range keys {
+ v := value.MapIndex(reflect.ValueOf(k))
+
+ mapcur := current
+ if tag.Get("flattened") == "" { // add "entry" tag to non-flat maps
+ child := NewXMLElement(xml.Name{Local: "entry"})
+ mapcur.AddChild(child)
+ mapcur = child
+ }
+
+ kchild := NewXMLElement(xml.Name{Local: kname})
+ kchild.Text = k
+ vchild := NewXMLElement(xml.Name{Local: vname})
+ mapcur.AddChild(kchild)
+ mapcur.AddChild(vchild)
+
+ if err := b.buildValue(v, vchild, ""); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// buildScalar will convert the value into a string and append it as a attribute or child
+// of the current XMLNode.
+//
+// The value will be added as an attribute if tag contains a "xmlAttribute" attribute value.
+//
+// Error will be returned if the value type is unsupported.
+func (b *xmlBuilder) buildScalar(value reflect.Value, current *XMLNode, tag reflect.StructTag) error {
+ var str string
+ switch converted := value.Interface().(type) {
+ case string:
+ str = converted
+ case []byte:
+ if !value.IsNil() {
+ str = base64.StdEncoding.EncodeToString(converted)
+ }
+ case bool:
+ str = strconv.FormatBool(converted)
+ case int64:
+ str = strconv.FormatInt(converted, 10)
+ case int:
+ str = strconv.Itoa(converted)
+ case float64:
+ str = strconv.FormatFloat(converted, 'f', -1, 64)
+ case float32:
+ str = strconv.FormatFloat(float64(converted), 'f', -1, 32)
+ case time.Time:
+ const ISO8601UTC = "2006-01-02T15:04:05Z"
+ str = converted.UTC().Format(ISO8601UTC)
+ default:
+ return fmt.Errorf("unsupported value for param %s: %v (%s)",
+ tag.Get("locationName"), value.Interface(), value.Type().Name())
+ }
+
+ xname := xml.Name{Local: tag.Get("locationName")}
+ if tag.Get("xmlAttribute") != "" { // put into current node's attribute list
+ attr := xml.Attr{Name: xname, Value: str}
+ current.Attr = append(current.Attr, attr)
+ } else { // regular text node
+ current.AddChild(&XMLNode{Name: xname, Text: str})
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
new file mode 100644
index 0000000..49f291a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
@@ -0,0 +1,260 @@
+package xmlutil
+
+import (
+ "encoding/base64"
+ "encoding/xml"
+ "fmt"
+ "io"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// UnmarshalXML deserializes an xml.Decoder into the container v. V
+// needs to match the shape of the XML expected to be decoded.
+// If the shape doesn't match unmarshaling will fail.
+func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error {
+ n, _ := XMLToStruct(d, nil)
+ if n.Children != nil {
+ for _, root := range n.Children {
+ for _, c := range root {
+ if wrappedChild, ok := c.Children[wrapper]; ok {
+ c = wrappedChild[0] // pull out wrapped element
+ }
+
+ err := parse(reflect.ValueOf(v), c, "")
+ if err != nil {
+ if err == io.EOF {
+ return nil
+ }
+ return err
+ }
+ }
+ }
+ return nil
+ }
+ return nil
+}
+
+// parse deserializes any value from the XMLNode. The type tag is used to infer the type, or reflect
+// will be used to determine the type from r.
+func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ rtype := r.Type()
+ if rtype.Kind() == reflect.Ptr {
+ rtype = rtype.Elem() // check kind of actual element type
+ }
+
+ t := tag.Get("type")
+ if t == "" {
+ switch rtype.Kind() {
+ case reflect.Struct:
+ t = "structure"
+ case reflect.Slice:
+ t = "list"
+ case reflect.Map:
+ t = "map"
+ }
+ }
+
+ switch t {
+ case "structure":
+ if field, ok := rtype.FieldByName("_"); ok {
+ tag = field.Tag
+ }
+ return parseStruct(r, node, tag)
+ case "list":
+ return parseList(r, node, tag)
+ case "map":
+ return parseMap(r, node, tag)
+ default:
+ return parseScalar(r, node, tag)
+ }
+}
+
+// parseStruct deserializes a structure and its fields from an XMLNode. Any nested
+// types in the structure will also be deserialized.
+func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ t := r.Type()
+ if r.Kind() == reflect.Ptr {
+ if r.IsNil() { // create the structure if it's nil
+ s := reflect.New(r.Type().Elem())
+ r.Set(s)
+ r = s
+ }
+
+ r = r.Elem()
+ t = t.Elem()
+ }
+
+ // unwrap any payloads
+ if payload := tag.Get("payload"); payload != "" {
+ field, _ := t.FieldByName(payload)
+ return parseStruct(r.FieldByName(payload), node, field.Tag)
+ }
+
+ for i := 0; i < t.NumField(); i++ {
+ field := t.Field(i)
+ if c := field.Name[0:1]; strings.ToLower(c) == c {
+ continue // ignore unexported fields
+ }
+
+ // figure out what this field is called
+ name := field.Name
+ if field.Tag.Get("flattened") != "" && field.Tag.Get("locationNameList") != "" {
+ name = field.Tag.Get("locationNameList")
+ } else if locName := field.Tag.Get("locationName"); locName != "" {
+ name = locName
+ }
+
+ // try to find the field by name in elements
+ elems := node.Children[name]
+
+ if elems == nil { // try to find the field in attributes
+ for _, a := range node.Attr {
+ if name == a.Name.Local {
+ // turn this into a text node for de-serializing
+ elems = []*XMLNode{{Text: a.Value}}
+ }
+ }
+ }
+
+ member := r.FieldByName(field.Name)
+ for _, elem := range elems {
+ err := parse(member, elem, field.Tag)
+ if err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+// parseList deserializes a list of values from an XML node. Each list entry
+// will also be deserialized.
+func parseList(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ t := r.Type()
+
+ if tag.Get("flattened") == "" { // look at all item entries
+ mname := "member"
+ if name := tag.Get("locationNameList"); name != "" {
+ mname = name
+ }
+
+ if Children, ok := node.Children[mname]; ok {
+ if r.IsNil() {
+ r.Set(reflect.MakeSlice(t, len(Children), len(Children)))
+ }
+
+ for i, c := range Children {
+ err := parse(r.Index(i), c, "")
+ if err != nil {
+ return err
+ }
+ }
+ }
+ } else { // flattened list means this is a single element
+ if r.IsNil() {
+ r.Set(reflect.MakeSlice(t, 0, 0))
+ }
+
+ childR := reflect.Zero(t.Elem())
+ r.Set(reflect.Append(r, childR))
+ err := parse(r.Index(r.Len()-1), node, "")
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// parseMap deserializes a map from an XMLNode. The direct children of the XMLNode
+// will also be deserialized as map entries.
+func parseMap(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ if r.IsNil() {
+ r.Set(reflect.MakeMap(r.Type()))
+ }
+
+ if tag.Get("flattened") == "" { // look at all child entries
+ for _, entry := range node.Children["entry"] {
+ parseMapEntry(r, entry, tag)
+ }
+ } else { // this element is itself an entry
+ parseMapEntry(r, node, tag)
+ }
+
+ return nil
+}
+
+// parseMapEntry deserializes a map entry from a XML node.
+func parseMapEntry(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ kname, vname := "key", "value"
+ if n := tag.Get("locationNameKey"); n != "" {
+ kname = n
+ }
+ if n := tag.Get("locationNameValue"); n != "" {
+ vname = n
+ }
+
+ keys, ok := node.Children[kname]
+ values := node.Children[vname]
+ if ok {
+ for i, key := range keys {
+ keyR := reflect.ValueOf(key.Text)
+ value := values[i]
+ valueR := reflect.New(r.Type().Elem()).Elem()
+
+ parse(valueR, value, "")
+ r.SetMapIndex(keyR, valueR)
+ }
+ }
+ return nil
+}
+
+// parseScaller deserializes an XMLNode value into a concrete type based on the
+// interface type of r.
+//
+// Error is returned if the deserialization fails due to invalid type conversion,
+// or unsupported interface type.
+func parseScalar(r reflect.Value, node *XMLNode, tag reflect.StructTag) error {
+ switch r.Interface().(type) {
+ case *string:
+ r.Set(reflect.ValueOf(&node.Text))
+ return nil
+ case []byte:
+ b, err := base64.StdEncoding.DecodeString(node.Text)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(b))
+ case *bool:
+ v, err := strconv.ParseBool(node.Text)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(&v))
+ case *int64:
+ v, err := strconv.ParseInt(node.Text, 10, 64)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(&v))
+ case *float64:
+ v, err := strconv.ParseFloat(node.Text, 64)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(&v))
+ case *time.Time:
+ const ISO8601UTC = "2006-01-02T15:04:05Z"
+ t, err := time.Parse(ISO8601UTC, node.Text)
+ if err != nil {
+ return err
+ }
+ r.Set(reflect.ValueOf(&t))
+ default:
+ return fmt.Errorf("unsupported value: %v (%s)", r.Interface(), r.Type())
+ }
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
new file mode 100644
index 0000000..72c198a
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
@@ -0,0 +1,105 @@
+package xmlutil
+
+import (
+ "encoding/xml"
+ "io"
+ "sort"
+)
+
+// A XMLNode contains the values to be encoded or decoded.
+type XMLNode struct {
+ Name xml.Name `json:",omitempty"`
+ Children map[string][]*XMLNode `json:",omitempty"`
+ Text string `json:",omitempty"`
+ Attr []xml.Attr `json:",omitempty"`
+}
+
+// NewXMLElement returns a pointer to a new XMLNode initialized to default values.
+func NewXMLElement(name xml.Name) *XMLNode {
+ return &XMLNode{
+ Name: name,
+ Children: map[string][]*XMLNode{},
+ Attr: []xml.Attr{},
+ }
+}
+
+// AddChild adds child to the XMLNode.
+func (n *XMLNode) AddChild(child *XMLNode) {
+ if _, ok := n.Children[child.Name.Local]; !ok {
+ n.Children[child.Name.Local] = []*XMLNode{}
+ }
+ n.Children[child.Name.Local] = append(n.Children[child.Name.Local], child)
+}
+
+// XMLToStruct converts a xml.Decoder stream to XMLNode with nested values.
+func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) {
+ out := &XMLNode{}
+ for {
+ tok, err := d.Token()
+ if tok == nil || err == io.EOF {
+ break
+ }
+ if err != nil {
+ return out, err
+ }
+
+ switch typed := tok.(type) {
+ case xml.CharData:
+ out.Text = string(typed.Copy())
+ case xml.StartElement:
+ el := typed.Copy()
+ out.Attr = el.Attr
+ if out.Children == nil {
+ out.Children = map[string][]*XMLNode{}
+ }
+
+ name := typed.Name.Local
+ slice := out.Children[name]
+ if slice == nil {
+ slice = []*XMLNode{}
+ }
+ node, e := XMLToStruct(d, &el)
+ if e != nil {
+ return out, e
+ }
+ node.Name = typed.Name
+ slice = append(slice, node)
+ out.Children[name] = slice
+ case xml.EndElement:
+ if s != nil && s.Name.Local == typed.Name.Local { // matching end token
+ return out, nil
+ }
+ }
+ }
+ return out, nil
+}
+
+// StructToXML writes an XMLNode to a xml.Encoder as tokens.
+func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error {
+ e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr})
+
+ if node.Text != "" {
+ e.EncodeToken(xml.CharData([]byte(node.Text)))
+ } else if sorted {
+ sortedNames := []string{}
+ for k := range node.Children {
+ sortedNames = append(sortedNames, k)
+ }
+ sort.Strings(sortedNames)
+
+ for _, k := range sortedNames {
+ for _, v := range node.Children[k] {
+ StructToXML(e, v, sorted)
+ }
+ }
+ } else {
+ for _, c := range node.Children {
+ for _, v := range c {
+ StructToXML(e, v, sorted)
+ }
+ }
+ }
+
+ e.EncodeToken(xml.EndElement{Name: node.Name})
+ return e.Flush()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go
new file mode 100644
index 0000000..244c86d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go
@@ -0,0 +1,82 @@
+package v4
+
+import (
+ "net/http"
+ "strings"
+)
+
+// validator houses a set of rule needed for validation of a
+// string value
+type rules []rule
+
+// rule interface allows for more flexible rules and just simply
+// checks whether or not a value adheres to that rule
+type rule interface {
+ IsValid(value string) bool
+}
+
+// IsValid will iterate through all rules and see if any rules
+// apply to the value and supports nested rules
+func (r rules) IsValid(value string) bool {
+ for _, rule := range r {
+ if rule.IsValid(value) {
+ return true
+ }
+ }
+ return false
+}
+
+// mapRule generic rule for maps
+type mapRule map[string]struct{}
+
+// IsValid for the map rule satisfies whether it exists in the map
+func (m mapRule) IsValid(value string) bool {
+ _, ok := m[value]
+ return ok
+}
+
+// whitelist is a generic rule for whitelisting
+type whitelist struct {
+ rule
+}
+
+// IsValid for whitelist checks if the value is within the whitelist
+func (w whitelist) IsValid(value string) bool {
+ return w.rule.IsValid(value)
+}
+
+// blacklist is a generic rule for blacklisting
+type blacklist struct {
+ rule
+}
+
+// IsValid for whitelist checks if the value is within the whitelist
+func (b blacklist) IsValid(value string) bool {
+ return !b.rule.IsValid(value)
+}
+
+type patterns []string
+
+// IsValid for patterns checks each pattern and returns if a match has
+// been found
+func (p patterns) IsValid(value string) bool {
+ for _, pattern := range p {
+ if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) {
+ return true
+ }
+ }
+ return false
+}
+
+// inclusiveRules rules allow for rules to depend on one another
+type inclusiveRules []rule
+
+// IsValid will return true if all rules are true
+func (r inclusiveRules) IsValid(value string) bool {
+ for _, rule := range r {
+ if !rule.IsValid(value) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/private/signer/v4/v4.go
new file mode 100644
index 0000000..4765800
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/signer/v4/v4.go
@@ -0,0 +1,465 @@
+// Package v4 implements signing for AWS V4 signer
+package v4
+
+import (
+ "crypto/hmac"
+ "crypto/sha256"
+ "encoding/hex"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/credentials"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol/rest"
+)
+
+const (
+ authHeaderPrefix = "AWS4-HMAC-SHA256"
+ timeFormat = "20060102T150405Z"
+ shortTimeFormat = "20060102"
+)
+
+var ignoredHeaders = rules{
+ blacklist{
+ mapRule{
+ "Authorization": struct{}{},
+ "User-Agent": struct{}{},
+ },
+ },
+}
+
+// requiredSignedHeaders is a whitelist for build canonical headers.
+var requiredSignedHeaders = rules{
+ whitelist{
+ mapRule{
+ "Cache-Control": struct{}{},
+ "Content-Disposition": struct{}{},
+ "Content-Encoding": struct{}{},
+ "Content-Language": struct{}{},
+ "Content-Md5": struct{}{},
+ "Content-Type": struct{}{},
+ "Expires": struct{}{},
+ "If-Match": struct{}{},
+ "If-Modified-Since": struct{}{},
+ "If-None-Match": struct{}{},
+ "If-Unmodified-Since": struct{}{},
+ "Range": struct{}{},
+ "X-Amz-Acl": struct{}{},
+ "X-Amz-Copy-Source": struct{}{},
+ "X-Amz-Copy-Source-If-Match": struct{}{},
+ "X-Amz-Copy-Source-If-Modified-Since": struct{}{},
+ "X-Amz-Copy-Source-If-None-Match": struct{}{},
+ "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{},
+ "X-Amz-Copy-Source-Range": struct{}{},
+ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{},
+ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{},
+ "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
+ "X-Amz-Grant-Full-control": struct{}{},
+ "X-Amz-Grant-Read": struct{}{},
+ "X-Amz-Grant-Read-Acp": struct{}{},
+ "X-Amz-Grant-Write": struct{}{},
+ "X-Amz-Grant-Write-Acp": struct{}{},
+ "X-Amz-Metadata-Directive": struct{}{},
+ "X-Amz-Mfa": struct{}{},
+ "X-Amz-Request-Payer": struct{}{},
+ "X-Amz-Server-Side-Encryption": struct{}{},
+ "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{},
+ "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{},
+ "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{},
+ "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{},
+ "X-Amz-Storage-Class": struct{}{},
+ "X-Amz-Website-Redirect-Location": struct{}{},
+ },
+ },
+ patterns{"X-Amz-Meta-"},
+}
+
+// allowedHoisting is a whitelist for build query headers. The boolean value
+// represents whether or not it is a pattern.
+var allowedQueryHoisting = inclusiveRules{
+ blacklist{requiredSignedHeaders},
+ patterns{"X-Amz-"},
+}
+
+type signer struct {
+ Request *http.Request
+ Time time.Time
+ ExpireTime time.Duration
+ ServiceName string
+ Region string
+ CredValues credentials.Value
+ Credentials *credentials.Credentials
+ Query url.Values
+ Body io.ReadSeeker
+ Debug aws.LogLevelType
+ Logger aws.Logger
+
+ isPresign bool
+ formattedTime string
+ formattedShortTime string
+
+ signedHeaders string
+ canonicalHeaders string
+ canonicalString string
+ credentialString string
+ stringToSign string
+ signature string
+ authorization string
+ notHoist bool
+ signedHeaderVals http.Header
+}
+
+// Sign requests with signature version 4.
+//
+// Will sign the requests with the service config's Credentials object
+// Signing is skipped if the credentials is the credentials.AnonymousCredentials
+// object.
+func Sign(req *request.Request) {
+ // If the request does not need to be signed ignore the signing of the
+ // request if the AnonymousCredentials object is used.
+ if req.Config.Credentials == credentials.AnonymousCredentials {
+ return
+ }
+
+ region := req.ClientInfo.SigningRegion
+ if region == "" {
+ region = aws.StringValue(req.Config.Region)
+ }
+
+ name := req.ClientInfo.SigningName
+ if name == "" {
+ name = req.ClientInfo.ServiceName
+ }
+
+ s := signer{
+ Request: req.HTTPRequest,
+ Time: req.Time,
+ ExpireTime: req.ExpireTime,
+ Query: req.HTTPRequest.URL.Query(),
+ Body: req.Body,
+ ServiceName: name,
+ Region: region,
+ Credentials: req.Config.Credentials,
+ Debug: req.Config.LogLevel.Value(),
+ Logger: req.Config.Logger,
+ notHoist: req.NotHoist,
+ }
+
+ req.Error = s.sign()
+ req.Time = s.Time
+ req.SignedHeaderVals = s.signedHeaderVals
+}
+
+func (v4 *signer) sign() error {
+ if v4.ExpireTime != 0 {
+ v4.isPresign = true
+ }
+
+ if v4.isRequestSigned() {
+ if !v4.Credentials.IsExpired() && time.Now().Before(v4.Time.Add(10*time.Minute)) {
+ // If the request is already signed, and the credentials have not
+ // expired, and the request is not too old ignore the signing request.
+ return nil
+ }
+ v4.Time = time.Now()
+
+ // The credentials have expired for this request. The current signing
+ // is invalid, and needs to be request because the request will fail.
+ if v4.isPresign {
+ v4.removePresign()
+ // Update the request's query string to ensure the values stays in
+ // sync in the case retrieving the new credentials fails.
+ v4.Request.URL.RawQuery = v4.Query.Encode()
+ }
+ }
+
+ var err error
+ v4.CredValues, err = v4.Credentials.Get()
+ if err != nil {
+ return err
+ }
+
+ if v4.isPresign {
+ v4.Query.Set("X-Amz-Algorithm", authHeaderPrefix)
+ if v4.CredValues.SessionToken != "" {
+ v4.Query.Set("X-Amz-Security-Token", v4.CredValues.SessionToken)
+ } else {
+ v4.Query.Del("X-Amz-Security-Token")
+ }
+ } else if v4.CredValues.SessionToken != "" {
+ v4.Request.Header.Set("X-Amz-Security-Token", v4.CredValues.SessionToken)
+ }
+
+ v4.build()
+
+ if v4.Debug.Matches(aws.LogDebugWithSigning) {
+ v4.logSigningInfo()
+ }
+
+ return nil
+}
+
+const logSignInfoMsg = `DEBUG: Request Signiture:
+---[ CANONICAL STRING ]-----------------------------
+%s
+---[ STRING TO SIGN ]--------------------------------
+%s%s
+-----------------------------------------------------`
+const logSignedURLMsg = `
+---[ SIGNED URL ]------------------------------------
+%s`
+
+func (v4 *signer) logSigningInfo() {
+ signedURLMsg := ""
+ if v4.isPresign {
+ signedURLMsg = fmt.Sprintf(logSignedURLMsg, v4.Request.URL.String())
+ }
+ msg := fmt.Sprintf(logSignInfoMsg, v4.canonicalString, v4.stringToSign, signedURLMsg)
+ v4.Logger.Log(msg)
+}
+
+func (v4 *signer) build() {
+
+ v4.buildTime() // no depends
+ v4.buildCredentialString() // no depends
+
+ unsignedHeaders := v4.Request.Header
+ if v4.isPresign {
+ if !v4.notHoist {
+ urlValues := url.Values{}
+ urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends
+ for k := range urlValues {
+ v4.Query[k] = urlValues[k]
+ }
+ }
+ }
+
+ v4.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders)
+ v4.buildCanonicalString() // depends on canon headers / signed headers
+ v4.buildStringToSign() // depends on canon string
+ v4.buildSignature() // depends on string to sign
+
+ if v4.isPresign {
+ v4.Request.URL.RawQuery += "&X-Amz-Signature=" + v4.signature
+ } else {
+ parts := []string{
+ authHeaderPrefix + " Credential=" + v4.CredValues.AccessKeyID + "/" + v4.credentialString,
+ "SignedHeaders=" + v4.signedHeaders,
+ "Signature=" + v4.signature,
+ }
+ v4.Request.Header.Set("Authorization", strings.Join(parts, ", "))
+ }
+}
+
+func (v4 *signer) buildTime() {
+ v4.formattedTime = v4.Time.UTC().Format(timeFormat)
+ v4.formattedShortTime = v4.Time.UTC().Format(shortTimeFormat)
+
+ if v4.isPresign {
+ duration := int64(v4.ExpireTime / time.Second)
+ v4.Query.Set("X-Amz-Date", v4.formattedTime)
+ v4.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10))
+ } else {
+ v4.Request.Header.Set("X-Amz-Date", v4.formattedTime)
+ }
+}
+
+func (v4 *signer) buildCredentialString() {
+ v4.credentialString = strings.Join([]string{
+ v4.formattedShortTime,
+ v4.Region,
+ v4.ServiceName,
+ "aws4_request",
+ }, "/")
+
+ if v4.isPresign {
+ v4.Query.Set("X-Amz-Credential", v4.CredValues.AccessKeyID+"/"+v4.credentialString)
+ }
+}
+
+func buildQuery(r rule, header http.Header) (url.Values, http.Header) {
+ query := url.Values{}
+ unsignedHeaders := http.Header{}
+ for k, h := range header {
+ if r.IsValid(k) {
+ query[k] = h
+ } else {
+ unsignedHeaders[k] = h
+ }
+ }
+
+ return query, unsignedHeaders
+}
+func (v4 *signer) buildCanonicalHeaders(r rule, header http.Header) {
+ var headers []string
+ headers = append(headers, "host")
+ for k, v := range header {
+ canonicalKey := http.CanonicalHeaderKey(k)
+ if !r.IsValid(canonicalKey) {
+ continue // ignored header
+ }
+ if v4.signedHeaderVals == nil {
+ v4.signedHeaderVals = make(http.Header)
+ }
+
+ lowerCaseKey := strings.ToLower(k)
+ if _, ok := v4.signedHeaderVals[lowerCaseKey]; ok {
+ // include additional values
+ v4.signedHeaderVals[lowerCaseKey] = append(v4.signedHeaderVals[lowerCaseKey], v...)
+ continue
+ }
+
+ headers = append(headers, lowerCaseKey)
+ v4.signedHeaderVals[lowerCaseKey] = v
+ }
+ sort.Strings(headers)
+
+ v4.signedHeaders = strings.Join(headers, ";")
+
+ if v4.isPresign {
+ v4.Query.Set("X-Amz-SignedHeaders", v4.signedHeaders)
+ }
+
+ headerValues := make([]string, len(headers))
+ for i, k := range headers {
+ if k == "host" {
+ headerValues[i] = "host:" + v4.Request.URL.Host
+ } else {
+ headerValues[i] = k + ":" +
+ strings.Join(v4.signedHeaderVals[k], ",")
+ }
+ }
+
+ v4.canonicalHeaders = strings.Join(stripExcessSpaces(headerValues), "\n")
+}
+
+func (v4 *signer) buildCanonicalString() {
+ v4.Request.URL.RawQuery = strings.Replace(v4.Query.Encode(), "+", "%20", -1)
+ uri := v4.Request.URL.Opaque
+ if uri != "" {
+ uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/")
+ } else {
+ uri = v4.Request.URL.Path
+ }
+ if uri == "" {
+ uri = "/"
+ }
+
+ if v4.ServiceName != "s3" {
+ uri = rest.EscapePath(uri, false)
+ }
+
+ v4.canonicalString = strings.Join([]string{
+ v4.Request.Method,
+ uri,
+ v4.Request.URL.RawQuery,
+ v4.canonicalHeaders + "\n",
+ v4.signedHeaders,
+ v4.bodyDigest(),
+ }, "\n")
+}
+
+func (v4 *signer) buildStringToSign() {
+ v4.stringToSign = strings.Join([]string{
+ authHeaderPrefix,
+ v4.formattedTime,
+ v4.credentialString,
+ hex.EncodeToString(makeSha256([]byte(v4.canonicalString))),
+ }, "\n")
+}
+
+func (v4 *signer) buildSignature() {
+ secret := v4.CredValues.SecretAccessKey
+ date := makeHmac([]byte("AWS4"+secret), []byte(v4.formattedShortTime))
+ region := makeHmac(date, []byte(v4.Region))
+ service := makeHmac(region, []byte(v4.ServiceName))
+ credentials := makeHmac(service, []byte("aws4_request"))
+ signature := makeHmac(credentials, []byte(v4.stringToSign))
+ v4.signature = hex.EncodeToString(signature)
+}
+
+func (v4 *signer) bodyDigest() string {
+ hash := v4.Request.Header.Get("X-Amz-Content-Sha256")
+ if hash == "" {
+ if v4.isPresign && v4.ServiceName == "s3" {
+ hash = "UNSIGNED-PAYLOAD"
+ } else if v4.Body == nil {
+ hash = hex.EncodeToString(makeSha256([]byte{}))
+ } else {
+ hash = hex.EncodeToString(makeSha256Reader(v4.Body))
+ }
+ v4.Request.Header.Add("X-Amz-Content-Sha256", hash)
+ }
+ return hash
+}
+
+// isRequestSigned returns if the request is currently signed or presigned
+func (v4 *signer) isRequestSigned() bool {
+ if v4.isPresign && v4.Query.Get("X-Amz-Signature") != "" {
+ return true
+ }
+ if v4.Request.Header.Get("Authorization") != "" {
+ return true
+ }
+
+ return false
+}
+
+// unsign removes signing flags for both signed and presigned requests.
+func (v4 *signer) removePresign() {
+ v4.Query.Del("X-Amz-Algorithm")
+ v4.Query.Del("X-Amz-Signature")
+ v4.Query.Del("X-Amz-Security-Token")
+ v4.Query.Del("X-Amz-Date")
+ v4.Query.Del("X-Amz-Expires")
+ v4.Query.Del("X-Amz-Credential")
+ v4.Query.Del("X-Amz-SignedHeaders")
+}
+
+func makeHmac(key []byte, data []byte) []byte {
+ hash := hmac.New(sha256.New, key)
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+func makeSha256(data []byte) []byte {
+ hash := sha256.New()
+ hash.Write(data)
+ return hash.Sum(nil)
+}
+
+func makeSha256Reader(reader io.ReadSeeker) []byte {
+ hash := sha256.New()
+ start, _ := reader.Seek(0, 1)
+ defer reader.Seek(start, 0)
+
+ io.Copy(hash, reader)
+ return hash.Sum(nil)
+}
+
+func stripExcessSpaces(headerVals []string) []string {
+ vals := make([]string, len(headerVals))
+ for i, str := range headerVals {
+ stripped := ""
+ found := false
+ str = strings.TrimSpace(str)
+ for _, c := range str {
+ if !found && c == ' ' {
+ stripped += string(c)
+ found = true
+ } else if c != ' ' {
+ stripped += string(c)
+ found = false
+ }
+ }
+ vals[i] = stripped
+ }
+ return vals
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go b/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go
new file mode 100644
index 0000000..b51e944
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go
@@ -0,0 +1,134 @@
+package waiter
+
+import (
+ "fmt"
+ "reflect"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// A Config provides a collection of configuration values to setup a generated
+// waiter code with.
+type Config struct {
+ Name string
+ Delay int
+ MaxAttempts int
+ Operation string
+ Acceptors []WaitAcceptor
+}
+
+// A WaitAcceptor provides the information needed to wait for an API operation
+// to complete.
+type WaitAcceptor struct {
+ Expected interface{}
+ Matcher string
+ State string
+ Argument string
+}
+
+// A Waiter provides waiting for an operation to complete.
+type Waiter struct {
+ Config
+ Client interface{}
+ Input interface{}
+}
+
+// Wait waits for an operation to complete, expire max attempts, or fail. Error
+// is returned if the operation fails.
+func (w *Waiter) Wait() error {
+ client := reflect.ValueOf(w.Client)
+ in := reflect.ValueOf(w.Input)
+ method := client.MethodByName(w.Config.Operation + "Request")
+
+ for i := 0; i < w.MaxAttempts; i++ {
+ res := method.Call([]reflect.Value{in})
+ req := res[0].Interface().(*request.Request)
+ req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Waiter"))
+
+ err := req.Send()
+ for _, a := range w.Acceptors {
+ result := false
+ var vals []interface{}
+ switch a.Matcher {
+ case "pathAll", "path":
+ // Require all matches to be equal for result to match
+ vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument)
+ if len(vals) == 0 {
+ break
+ }
+ result = true
+ for _, val := range vals {
+ if !awsutil.DeepEqual(val, a.Expected) {
+ result = false
+ break
+ }
+ }
+ case "pathAny":
+ // Only a single match needs to equal for the result to match
+ vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument)
+ for _, val := range vals {
+ if awsutil.DeepEqual(val, a.Expected) {
+ result = true
+ break
+ }
+ }
+ case "status":
+ s := a.Expected.(int)
+ result = s == req.HTTPResponse.StatusCode
+ case "error":
+ if aerr, ok := err.(awserr.Error); ok {
+ result = aerr.Code() == a.Expected.(string)
+ }
+ case "pathList":
+ // ignored matcher
+ default:
+ logf(client, "WARNING: Waiter for %s encountered unexpected matcher: %s",
+ w.Config.Operation, a.Matcher)
+ }
+
+ if !result {
+ // If there was no matching result found there is nothing more to do
+ // for this response, retry the request.
+ continue
+ }
+
+ switch a.State {
+ case "success":
+ // waiter completed
+ return nil
+ case "failure":
+ // Waiter failure state triggered
+ return awserr.New("ResourceNotReady",
+ fmt.Sprintf("failed waiting for successful resource state"), err)
+ case "retry":
+ // clear the error and retry the operation
+ err = nil
+ default:
+ logf(client, "WARNING: Waiter for %s encountered unexpected state: %s",
+ w.Config.Operation, a.State)
+ }
+ }
+ if err != nil {
+ return err
+ }
+
+ time.Sleep(time.Second * time.Duration(w.Delay))
+ }
+
+ return awserr.New("ResourceNotReady",
+ fmt.Sprintf("exceeded %d wait attempts", w.MaxAttempts), nil)
+}
+
+func logf(client reflect.Value, msg string, args ...interface{}) {
+ cfgVal := client.FieldByName("Config")
+ if !cfgVal.IsValid() {
+ return
+ }
+ if cfg, ok := cfgVal.Interface().(*aws.Config); ok && cfg.Logger != nil {
+ cfg.Logger.Log(fmt.Sprintf(msg, args...))
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go
new file mode 100644
index 0000000..f1f6086
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go
@@ -0,0 +1,8187 @@
+// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
+
+// Package s3 provides a client for Amazon Simple Storage Service.
+package s3
+
+import (
+ "fmt"
+ "io"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol"
+ "github.com/aws/aws-sdk-go/private/protocol/restxml"
+)
+
+const opAbortMultipartUpload = "AbortMultipartUpload"
+
+// AbortMultipartUploadRequest generates a request for the AbortMultipartUpload operation.
+func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *request.Request, output *AbortMultipartUploadOutput) {
+ op := &request.Operation{
+ Name: opAbortMultipartUpload,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &AbortMultipartUploadInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &AbortMultipartUploadOutput{}
+ req.Data = output
+ return
+}
+
+// Aborts a multipart upload.
+//
+// To verify that all parts have been removed, so you don't get charged for
+// the part storage, you should call the List Parts operation and ensure the
+// parts list is empty.
+func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) {
+ req, out := c.AbortMultipartUploadRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opCompleteMultipartUpload = "CompleteMultipartUpload"
+
+// CompleteMultipartUploadRequest generates a request for the CompleteMultipartUpload operation.
+func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *request.Request, output *CompleteMultipartUploadOutput) {
+ op := &request.Operation{
+ Name: opCompleteMultipartUpload,
+ HTTPMethod: "POST",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &CompleteMultipartUploadInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &CompleteMultipartUploadOutput{}
+ req.Data = output
+ return
+}
+
+// Completes a multipart upload by assembling previously uploaded parts.
+func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) {
+ req, out := c.CompleteMultipartUploadRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opCopyObject = "CopyObject"
+
+// CopyObjectRequest generates a request for the CopyObject operation.
+func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, output *CopyObjectOutput) {
+ op := &request.Operation{
+ Name: opCopyObject,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &CopyObjectInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &CopyObjectOutput{}
+ req.Data = output
+ return
+}
+
+// Creates a copy of an object that is already stored in Amazon S3.
+func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) {
+ req, out := c.CopyObjectRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opCreateBucket = "CreateBucket"
+
+// CreateBucketRequest generates a request for the CreateBucket operation.
+func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request, output *CreateBucketOutput) {
+ op := &request.Operation{
+ Name: opCreateBucket,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}",
+ }
+
+ if input == nil {
+ input = &CreateBucketInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &CreateBucketOutput{}
+ req.Data = output
+ return
+}
+
+// Creates a new bucket.
+func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) {
+ req, out := c.CreateBucketRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opCreateMultipartUpload = "CreateMultipartUpload"
+
+// CreateMultipartUploadRequest generates a request for the CreateMultipartUpload operation.
+func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (req *request.Request, output *CreateMultipartUploadOutput) {
+ op := &request.Operation{
+ Name: opCreateMultipartUpload,
+ HTTPMethod: "POST",
+ HTTPPath: "/{Bucket}/{Key+}?uploads",
+ }
+
+ if input == nil {
+ input = &CreateMultipartUploadInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &CreateMultipartUploadOutput{}
+ req.Data = output
+ return
+}
+
+// Initiates a multipart upload and returns an upload ID.
+//
+// Note: After you initiate multipart upload and upload one or more parts, you
+// must either complete or abort multipart upload in order to stop getting charged
+// for storage of the uploaded parts. Only after you either complete or abort
+// multipart upload, Amazon S3 frees up the parts storage and stops charging
+// you for the parts storage.
+func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) {
+ req, out := c.CreateMultipartUploadRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opDeleteBucket = "DeleteBucket"
+
+// DeleteBucketRequest generates a request for the DeleteBucket operation.
+func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request, output *DeleteBucketOutput) {
+ op := &request.Operation{
+ Name: opDeleteBucket,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}",
+ }
+
+ if input == nil {
+ input = &DeleteBucketInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ output = &DeleteBucketOutput{}
+ req.Data = output
+ return
+}
+
+// Deletes the bucket. All objects (including all object versions and Delete
+// Markers) in the bucket must be deleted before the bucket itself can be deleted.
+func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) {
+ req, out := c.DeleteBucketRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opDeleteBucketCors = "DeleteBucketCors"
+
+// DeleteBucketCorsRequest generates a request for the DeleteBucketCors operation.
+func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request.Request, output *DeleteBucketCorsOutput) {
+ op := &request.Operation{
+ Name: opDeleteBucketCors,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?cors",
+ }
+
+ if input == nil {
+ input = &DeleteBucketCorsInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ output = &DeleteBucketCorsOutput{}
+ req.Data = output
+ return
+}
+
+// Deletes the cors configuration information set for the bucket.
+func (c *S3) DeleteBucketCors(input *DeleteBucketCorsInput) (*DeleteBucketCorsOutput, error) {
+ req, out := c.DeleteBucketCorsRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opDeleteBucketLifecycle = "DeleteBucketLifecycle"
+
+// DeleteBucketLifecycleRequest generates a request for the DeleteBucketLifecycle operation.
+func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *request.Request, output *DeleteBucketLifecycleOutput) {
+ op := &request.Operation{
+ Name: opDeleteBucketLifecycle,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?lifecycle",
+ }
+
+ if input == nil {
+ input = &DeleteBucketLifecycleInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ output = &DeleteBucketLifecycleOutput{}
+ req.Data = output
+ return
+}
+
+// Deletes the lifecycle configuration from the bucket.
+func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) {
+ req, out := c.DeleteBucketLifecycleRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opDeleteBucketPolicy = "DeleteBucketPolicy"
+
+// DeleteBucketPolicyRequest generates a request for the DeleteBucketPolicy operation.
+func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *request.Request, output *DeleteBucketPolicyOutput) {
+ op := &request.Operation{
+ Name: opDeleteBucketPolicy,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?policy",
+ }
+
+ if input == nil {
+ input = &DeleteBucketPolicyInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ output = &DeleteBucketPolicyOutput{}
+ req.Data = output
+ return
+}
+
+// Deletes the policy from the bucket.
+func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPolicyOutput, error) {
+ req, out := c.DeleteBucketPolicyRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opDeleteBucketReplication = "DeleteBucketReplication"
+
+// DeleteBucketReplicationRequest generates a request for the DeleteBucketReplication operation.
+func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *request.Request, output *DeleteBucketReplicationOutput) {
+ op := &request.Operation{
+ Name: opDeleteBucketReplication,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?replication",
+ }
+
+ if input == nil {
+ input = &DeleteBucketReplicationInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ output = &DeleteBucketReplicationOutput{}
+ req.Data = output
+ return
+}
+
+// Deletes the replication configuration from the bucket.
+func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) {
+ req, out := c.DeleteBucketReplicationRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opDeleteBucketTagging = "DeleteBucketTagging"
+
+// DeleteBucketTaggingRequest generates a request for the DeleteBucketTagging operation.
+func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *request.Request, output *DeleteBucketTaggingOutput) {
+ op := &request.Operation{
+ Name: opDeleteBucketTagging,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?tagging",
+ }
+
+ if input == nil {
+ input = &DeleteBucketTaggingInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ output = &DeleteBucketTaggingOutput{}
+ req.Data = output
+ return
+}
+
+// Deletes the tags from the bucket.
+func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucketTaggingOutput, error) {
+ req, out := c.DeleteBucketTaggingRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opDeleteBucketWebsite = "DeleteBucketWebsite"
+
+// DeleteBucketWebsiteRequest generates a request for the DeleteBucketWebsite operation.
+func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *request.Request, output *DeleteBucketWebsiteOutput) {
+ op := &request.Operation{
+ Name: opDeleteBucketWebsite,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}?website",
+ }
+
+ if input == nil {
+ input = &DeleteBucketWebsiteInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ output = &DeleteBucketWebsiteOutput{}
+ req.Data = output
+ return
+}
+
+// This operation removes the website configuration from the bucket.
+func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) {
+ req, out := c.DeleteBucketWebsiteRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opDeleteObject = "DeleteObject"
+
+// DeleteObjectRequest generates a request for the DeleteObject operation.
+func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request, output *DeleteObjectOutput) {
+ op := &request.Operation{
+ Name: opDeleteObject,
+ HTTPMethod: "DELETE",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &DeleteObjectInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &DeleteObjectOutput{}
+ req.Data = output
+ return
+}
+
+// Removes the null version (if there is one) of an object and inserts a delete
+// marker, which becomes the latest version of the object. If there isn't a
+// null version, Amazon S3 does not remove any objects.
+func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) {
+ req, out := c.DeleteObjectRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opDeleteObjects = "DeleteObjects"
+
+// DeleteObjectsRequest generates a request for the DeleteObjects operation.
+func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Request, output *DeleteObjectsOutput) {
+ op := &request.Operation{
+ Name: opDeleteObjects,
+ HTTPMethod: "POST",
+ HTTPPath: "/{Bucket}?delete",
+ }
+
+ if input == nil {
+ input = &DeleteObjectsInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &DeleteObjectsOutput{}
+ req.Data = output
+ return
+}
+
+// This operation enables you to delete multiple objects from a bucket using
+// a single HTTP request. You may specify up to 1000 keys.
+func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, error) {
+ req, out := c.DeleteObjectsRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration"
+
+// GetBucketAccelerateConfigurationRequest generates a request for the GetBucketAccelerateConfiguration operation.
+func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateConfigurationInput) (req *request.Request, output *GetBucketAccelerateConfigurationOutput) {
+ op := &request.Operation{
+ Name: opGetBucketAccelerateConfiguration,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?accelerate",
+ }
+
+ if input == nil {
+ input = &GetBucketAccelerateConfigurationInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &GetBucketAccelerateConfigurationOutput{}
+ req.Data = output
+ return
+}
+
+// Returns the accelerate configuration of a bucket.
+func (c *S3) GetBucketAccelerateConfiguration(input *GetBucketAccelerateConfigurationInput) (*GetBucketAccelerateConfigurationOutput, error) {
+ req, out := c.GetBucketAccelerateConfigurationRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opGetBucketAcl = "GetBucketAcl"
+
+// GetBucketAclRequest generates a request for the GetBucketAcl operation.
+func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request, output *GetBucketAclOutput) {
+ op := &request.Operation{
+ Name: opGetBucketAcl,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?acl",
+ }
+
+ if input == nil {
+ input = &GetBucketAclInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &GetBucketAclOutput{}
+ req.Data = output
+ return
+}
+
+// Gets the access control policy for the bucket.
+func (c *S3) GetBucketAcl(input *GetBucketAclInput) (*GetBucketAclOutput, error) {
+ req, out := c.GetBucketAclRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opGetBucketCors = "GetBucketCors"
+
+// GetBucketCorsRequest generates a request for the GetBucketCors operation.
+func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Request, output *GetBucketCorsOutput) {
+ op := &request.Operation{
+ Name: opGetBucketCors,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?cors",
+ }
+
+ if input == nil {
+ input = &GetBucketCorsInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &GetBucketCorsOutput{}
+ req.Data = output
+ return
+}
+
+// Returns the cors configuration for the bucket.
+func (c *S3) GetBucketCors(input *GetBucketCorsInput) (*GetBucketCorsOutput, error) {
+ req, out := c.GetBucketCorsRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opGetBucketLifecycle = "GetBucketLifecycle"
+
+// GetBucketLifecycleRequest generates a request for the GetBucketLifecycle operation.
+func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *request.Request, output *GetBucketLifecycleOutput) {
+ if c.Client.Config.Logger != nil {
+ c.Client.Config.Logger.Log("This operation, GetBucketLifecycle, has been deprecated")
+ }
+ op := &request.Operation{
+ Name: opGetBucketLifecycle,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?lifecycle",
+ }
+
+ if input == nil {
+ input = &GetBucketLifecycleInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &GetBucketLifecycleOutput{}
+ req.Data = output
+ return
+}
+
+// Deprecated, see the GetBucketLifecycleConfiguration operation.
+func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) {
+ req, out := c.GetBucketLifecycleRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration"
+
+// GetBucketLifecycleConfigurationRequest generates a request for the GetBucketLifecycleConfiguration operation.
+func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleConfigurationInput) (req *request.Request, output *GetBucketLifecycleConfigurationOutput) {
+ op := &request.Operation{
+ Name: opGetBucketLifecycleConfiguration,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?lifecycle",
+ }
+
+ if input == nil {
+ input = &GetBucketLifecycleConfigurationInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &GetBucketLifecycleConfigurationOutput{}
+ req.Data = output
+ return
+}
+
+// Returns the lifecycle configuration information set on the bucket.
+func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurationInput) (*GetBucketLifecycleConfigurationOutput, error) {
+ req, out := c.GetBucketLifecycleConfigurationRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opGetBucketLocation = "GetBucketLocation"
+
+// GetBucketLocationRequest generates a request for the GetBucketLocation operation.
+func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *request.Request, output *GetBucketLocationOutput) {
+ op := &request.Operation{
+ Name: opGetBucketLocation,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?location",
+ }
+
+ if input == nil {
+ input = &GetBucketLocationInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &GetBucketLocationOutput{}
+ req.Data = output
+ return
+}
+
+// Returns the region the bucket resides in.
+func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) {
+ req, out := c.GetBucketLocationRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opGetBucketLogging = "GetBucketLogging"
+
+// GetBucketLoggingRequest generates a request for the GetBucketLogging operation.
+func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request.Request, output *GetBucketLoggingOutput) {
+ op := &request.Operation{
+ Name: opGetBucketLogging,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?logging",
+ }
+
+ if input == nil {
+ input = &GetBucketLoggingInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &GetBucketLoggingOutput{}
+ req.Data = output
+ return
+}
+
+// Returns the logging status of a bucket and the permissions users have to
+// view and modify that status. To use GET, you must be the bucket owner.
+func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) {
+ req, out := c.GetBucketLoggingRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opGetBucketNotification = "GetBucketNotification"
+
+// GetBucketNotificationRequest generates a request for the GetBucketNotification operation.
+func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfigurationDeprecated) {
+ if c.Client.Config.Logger != nil {
+ c.Client.Config.Logger.Log("This operation, GetBucketNotification, has been deprecated")
+ }
+ op := &request.Operation{
+ Name: opGetBucketNotification,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?notification",
+ }
+
+ if input == nil {
+ input = &GetBucketNotificationConfigurationRequest{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &NotificationConfigurationDeprecated{}
+ req.Data = output
+ return
+}
+
+// Deprecated, see the GetBucketNotificationConfiguration operation.
+func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) {
+ req, out := c.GetBucketNotificationRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration"
+
+// GetBucketNotificationConfigurationRequest generates a request for the GetBucketNotificationConfiguration operation.
+func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfiguration) {
+ op := &request.Operation{
+ Name: opGetBucketNotificationConfiguration,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?notification",
+ }
+
+ if input == nil {
+ input = &GetBucketNotificationConfigurationRequest{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &NotificationConfiguration{}
+ req.Data = output
+ return
+}
+
+// Returns the notification configuration of a bucket.
+func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConfigurationRequest) (*NotificationConfiguration, error) {
+ req, out := c.GetBucketNotificationConfigurationRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opGetBucketPolicy = "GetBucketPolicy"
+
+// GetBucketPolicyRequest generates a request for the GetBucketPolicy operation.
+func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.Request, output *GetBucketPolicyOutput) {
+ op := &request.Operation{
+ Name: opGetBucketPolicy,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?policy",
+ }
+
+ if input == nil {
+ input = &GetBucketPolicyInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &GetBucketPolicyOutput{}
+ req.Data = output
+ return
+}
+
+// Returns the policy of a specified bucket.
+func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutput, error) {
+ req, out := c.GetBucketPolicyRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opGetBucketReplication = "GetBucketReplication"
+
+// GetBucketReplicationRequest generates a request for the GetBucketReplication operation.
+func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *request.Request, output *GetBucketReplicationOutput) {
+ op := &request.Operation{
+ Name: opGetBucketReplication,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?replication",
+ }
+
+ if input == nil {
+ input = &GetBucketReplicationInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &GetBucketReplicationOutput{}
+ req.Data = output
+ return
+}
+
+// Deprecated, see the GetBucketReplicationConfiguration operation.
+func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) {
+ req, out := c.GetBucketReplicationRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opGetBucketRequestPayment = "GetBucketRequestPayment"
+
+// GetBucketRequestPaymentRequest generates a request for the GetBucketRequestPayment operation.
+func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) (req *request.Request, output *GetBucketRequestPaymentOutput) {
+ op := &request.Operation{
+ Name: opGetBucketRequestPayment,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?requestPayment",
+ }
+
+ if input == nil {
+ input = &GetBucketRequestPaymentInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &GetBucketRequestPaymentOutput{}
+ req.Data = output
+ return
+}
+
+// Returns the request payment configuration of a bucket.
+func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetBucketRequestPaymentOutput, error) {
+ req, out := c.GetBucketRequestPaymentRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opGetBucketTagging = "GetBucketTagging"
+
+// GetBucketTaggingRequest generates a request for the GetBucketTagging operation.
+func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request.Request, output *GetBucketTaggingOutput) {
+ op := &request.Operation{
+ Name: opGetBucketTagging,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?tagging",
+ }
+
+ if input == nil {
+ input = &GetBucketTaggingInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &GetBucketTaggingOutput{}
+ req.Data = output
+ return
+}
+
+// Returns the tag set associated with the bucket.
+func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOutput, error) {
+ req, out := c.GetBucketTaggingRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opGetBucketVersioning = "GetBucketVersioning"
+
+// GetBucketVersioningRequest generates a request for the GetBucketVersioning operation.
+func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *request.Request, output *GetBucketVersioningOutput) {
+ op := &request.Operation{
+ Name: opGetBucketVersioning,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?versioning",
+ }
+
+ if input == nil {
+ input = &GetBucketVersioningInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &GetBucketVersioningOutput{}
+ req.Data = output
+ return
+}
+
+// Returns the versioning state of a bucket.
+func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) {
+ req, out := c.GetBucketVersioningRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opGetBucketWebsite = "GetBucketWebsite"
+
+// GetBucketWebsiteRequest generates a request for the GetBucketWebsite operation.
+func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request.Request, output *GetBucketWebsiteOutput) {
+ op := &request.Operation{
+ Name: opGetBucketWebsite,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?website",
+ }
+
+ if input == nil {
+ input = &GetBucketWebsiteInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &GetBucketWebsiteOutput{}
+ req.Data = output
+ return
+}
+
+// Returns the website configuration for a bucket.
+func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) {
+ req, out := c.GetBucketWebsiteRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opGetObject = "GetObject"
+
+// GetObjectRequest generates a request for the GetObject operation.
+func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, output *GetObjectOutput) {
+ op := &request.Operation{
+ Name: opGetObject,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &GetObjectInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &GetObjectOutput{}
+ req.Data = output
+ return
+}
+
+// Retrieves objects from Amazon S3.
+func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) {
+ req, out := c.GetObjectRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opGetObjectAcl = "GetObjectAcl"
+
+// GetObjectAclRequest generates a request for the GetObjectAcl operation.
+func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request, output *GetObjectAclOutput) {
+ op := &request.Operation{
+ Name: opGetObjectAcl,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}/{Key+}?acl",
+ }
+
+ if input == nil {
+ input = &GetObjectAclInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &GetObjectAclOutput{}
+ req.Data = output
+ return
+}
+
+// Returns the access control list (ACL) of an object.
+func (c *S3) GetObjectAcl(input *GetObjectAclInput) (*GetObjectAclOutput, error) {
+ req, out := c.GetObjectAclRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opGetObjectTorrent = "GetObjectTorrent"
+
+// GetObjectTorrentRequest generates a request for the GetObjectTorrent operation.
+func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request.Request, output *GetObjectTorrentOutput) {
+ op := &request.Operation{
+ Name: opGetObjectTorrent,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}/{Key+}?torrent",
+ }
+
+ if input == nil {
+ input = &GetObjectTorrentInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &GetObjectTorrentOutput{}
+ req.Data = output
+ return
+}
+
+// Return torrent files from a bucket.
+func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) {
+ req, out := c.GetObjectTorrentRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opHeadBucket = "HeadBucket"
+
+// HeadBucketRequest generates a request for the HeadBucket operation.
+func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, output *HeadBucketOutput) {
+ op := &request.Operation{
+ Name: opHeadBucket,
+ HTTPMethod: "HEAD",
+ HTTPPath: "/{Bucket}",
+ }
+
+ if input == nil {
+ input = &HeadBucketInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ output = &HeadBucketOutput{}
+ req.Data = output
+ return
+}
+
+// This operation is useful to determine if a bucket exists and you have permission
+// to access it.
+func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) {
+ req, out := c.HeadBucketRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opHeadObject = "HeadObject"
+
+// HeadObjectRequest generates a request for the HeadObject operation.
+func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, output *HeadObjectOutput) {
+ op := &request.Operation{
+ Name: opHeadObject,
+ HTTPMethod: "HEAD",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &HeadObjectInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &HeadObjectOutput{}
+ req.Data = output
+ return
+}
+
+// The HEAD operation retrieves metadata from an object without returning the
+// object itself. This operation is useful if you're only interested in an object's
+// metadata. To use HEAD, you must have READ access to the object.
+func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) {
+ req, out := c.HeadObjectRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opListBuckets = "ListBuckets"
+
+// ListBucketsRequest generates a request for the ListBuckets operation.
+func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, output *ListBucketsOutput) {
+ op := &request.Operation{
+ Name: opListBuckets,
+ HTTPMethod: "GET",
+ HTTPPath: "/",
+ }
+
+ if input == nil {
+ input = &ListBucketsInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &ListBucketsOutput{}
+ req.Data = output
+ return
+}
+
+// Returns a list of all buckets owned by the authenticated sender of the request.
+func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) {
+ req, out := c.ListBucketsRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opListMultipartUploads = "ListMultipartUploads"
+
+// ListMultipartUploadsRequest generates a request for the ListMultipartUploads operation.
+func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) {
+ op := &request.Operation{
+ Name: opListMultipartUploads,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?uploads",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"KeyMarker", "UploadIdMarker"},
+ OutputTokens: []string{"NextKeyMarker", "NextUploadIdMarker"},
+ LimitToken: "MaxUploads",
+ TruncationToken: "IsTruncated",
+ },
+ }
+
+ if input == nil {
+ input = &ListMultipartUploadsInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &ListMultipartUploadsOutput{}
+ req.Data = output
+ return
+}
+
+// This operation lists in-progress multipart uploads.
+func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) {
+ req, out := c.ListMultipartUploadsRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(p *ListMultipartUploadsOutput, lastPage bool) (shouldContinue bool)) error {
+ page, _ := c.ListMultipartUploadsRequest(input)
+ page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator"))
+ return page.EachPage(func(p interface{}, lastPage bool) bool {
+ return fn(p.(*ListMultipartUploadsOutput), lastPage)
+ })
+}
+
+const opListObjectVersions = "ListObjectVersions"
+
+// ListObjectVersionsRequest generates a request for the ListObjectVersions operation.
+func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *request.Request, output *ListObjectVersionsOutput) {
+ op := &request.Operation{
+ Name: opListObjectVersions,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?versions",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"KeyMarker", "VersionIdMarker"},
+ OutputTokens: []string{"NextKeyMarker", "NextVersionIdMarker"},
+ LimitToken: "MaxKeys",
+ TruncationToken: "IsTruncated",
+ },
+ }
+
+ if input == nil {
+ input = &ListObjectVersionsInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &ListObjectVersionsOutput{}
+ req.Data = output
+ return
+}
+
+// Returns metadata about all of the versions of objects in a bucket.
+func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) {
+ req, out := c.ListObjectVersionsRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(p *ListObjectVersionsOutput, lastPage bool) (shouldContinue bool)) error {
+ page, _ := c.ListObjectVersionsRequest(input)
+ page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator"))
+ return page.EachPage(func(p interface{}, lastPage bool) bool {
+ return fn(p.(*ListObjectVersionsOutput), lastPage)
+ })
+}
+
+const opListObjects = "ListObjects"
+
+// ListObjectsRequest generates a request for the ListObjects operation.
+func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, output *ListObjectsOutput) {
+ op := &request.Operation{
+ Name: opListObjects,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"Marker"},
+ OutputTokens: []string{"NextMarker || Contents[-1].Key"},
+ LimitToken: "MaxKeys",
+ TruncationToken: "IsTruncated",
+ },
+ }
+
+ if input == nil {
+ input = &ListObjectsInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &ListObjectsOutput{}
+ req.Data = output
+ return
+}
+
+// Returns some or all (up to 1000) of the objects in a bucket. You can use
+// the request parameters as selection criteria to return a subset of the objects
+// in a bucket.
+func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) {
+ req, out := c.ListObjectsRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(p *ListObjectsOutput, lastPage bool) (shouldContinue bool)) error {
+ page, _ := c.ListObjectsRequest(input)
+ page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator"))
+ return page.EachPage(func(p interface{}, lastPage bool) bool {
+ return fn(p.(*ListObjectsOutput), lastPage)
+ })
+}
+
+const opListObjectsV2 = "ListObjectsV2"
+
+// ListObjectsV2Request generates a request for the ListObjectsV2 operation.
+func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Request, output *ListObjectsV2Output) {
+ op := &request.Operation{
+ Name: opListObjectsV2,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}?list-type=2",
+ }
+
+ if input == nil {
+ input = &ListObjectsV2Input{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &ListObjectsV2Output{}
+ req.Data = output
+ return
+}
+
+// Returns some or all (up to 1000) of the objects in a bucket. You can use
+// the request parameters as selection criteria to return a subset of the objects
+// in a bucket. Note: ListObjectsV2 is the revised List Objects API and we recommend
+// you use this revised API for new application development.
+func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, error) {
+ req, out := c.ListObjectsV2Request(input)
+ err := req.Send()
+ return out, err
+}
+
+const opListParts = "ListParts"
+
+// ListPartsRequest generates a request for the ListParts operation.
+func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) {
+ op := &request.Operation{
+ Name: opListParts,
+ HTTPMethod: "GET",
+ HTTPPath: "/{Bucket}/{Key+}",
+ Paginator: &request.Paginator{
+ InputTokens: []string{"PartNumberMarker"},
+ OutputTokens: []string{"NextPartNumberMarker"},
+ LimitToken: "MaxParts",
+ TruncationToken: "IsTruncated",
+ },
+ }
+
+ if input == nil {
+ input = &ListPartsInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &ListPartsOutput{}
+ req.Data = output
+ return
+}
+
+// Lists the parts that have been uploaded for a specific multipart upload.
+func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) {
+ req, out := c.ListPartsRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+func (c *S3) ListPartsPages(input *ListPartsInput, fn func(p *ListPartsOutput, lastPage bool) (shouldContinue bool)) error {
+ page, _ := c.ListPartsRequest(input)
+ page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator"))
+ return page.EachPage(func(p interface{}, lastPage bool) bool {
+ return fn(p.(*ListPartsOutput), lastPage)
+ })
+}
+
+const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration"
+
+// PutBucketAccelerateConfigurationRequest generates a request for the PutBucketAccelerateConfiguration operation.
+func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateConfigurationInput) (req *request.Request, output *PutBucketAccelerateConfigurationOutput) {
+ op := &request.Operation{
+ Name: opPutBucketAccelerateConfiguration,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?accelerate",
+ }
+
+ if input == nil {
+ input = &PutBucketAccelerateConfigurationInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ output = &PutBucketAccelerateConfigurationOutput{}
+ req.Data = output
+ return
+}
+
+// Sets the accelerate configuration of an existing bucket.
+func (c *S3) PutBucketAccelerateConfiguration(input *PutBucketAccelerateConfigurationInput) (*PutBucketAccelerateConfigurationOutput, error) {
+ req, out := c.PutBucketAccelerateConfigurationRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opPutBucketAcl = "PutBucketAcl"
+
+// PutBucketAclRequest generates a request for the PutBucketAcl operation.
+func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request, output *PutBucketAclOutput) {
+ op := &request.Operation{
+ Name: opPutBucketAcl,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?acl",
+ }
+
+ if input == nil {
+ input = &PutBucketAclInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ output = &PutBucketAclOutput{}
+ req.Data = output
+ return
+}
+
+// Sets the permissions on a bucket using access control lists (ACL).
+func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) {
+ req, out := c.PutBucketAclRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opPutBucketCors = "PutBucketCors"
+
+// PutBucketCorsRequest generates a request for the PutBucketCors operation.
+func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Request, output *PutBucketCorsOutput) {
+ op := &request.Operation{
+ Name: opPutBucketCors,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?cors",
+ }
+
+ if input == nil {
+ input = &PutBucketCorsInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ output = &PutBucketCorsOutput{}
+ req.Data = output
+ return
+}
+
+// Sets the cors configuration for a bucket.
+func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, error) {
+ req, out := c.PutBucketCorsRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opPutBucketLifecycle = "PutBucketLifecycle"
+
+// PutBucketLifecycleRequest generates a request for the PutBucketLifecycle operation.
+func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *request.Request, output *PutBucketLifecycleOutput) {
+ if c.Client.Config.Logger != nil {
+ c.Client.Config.Logger.Log("This operation, PutBucketLifecycle, has been deprecated")
+ }
+ op := &request.Operation{
+ Name: opPutBucketLifecycle,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?lifecycle",
+ }
+
+ if input == nil {
+ input = &PutBucketLifecycleInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ output = &PutBucketLifecycleOutput{}
+ req.Data = output
+ return
+}
+
+// Deprecated, see the PutBucketLifecycleConfiguration operation.
+func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) {
+ req, out := c.PutBucketLifecycleRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration"
+
+// PutBucketLifecycleConfigurationRequest generates a request for the PutBucketLifecycleConfiguration operation.
+func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) {
+ op := &request.Operation{
+ Name: opPutBucketLifecycleConfiguration,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?lifecycle",
+ }
+
+ if input == nil {
+ input = &PutBucketLifecycleConfigurationInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ output = &PutBucketLifecycleConfigurationOutput{}
+ req.Data = output
+ return
+}
+
+// Sets lifecycle configuration for your bucket. If a lifecycle configuration
+// exists, it replaces it.
+func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) {
+ req, out := c.PutBucketLifecycleConfigurationRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opPutBucketLogging = "PutBucketLogging"
+
+// PutBucketLoggingRequest generates a request for the PutBucketLogging operation.
+func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) {
+ op := &request.Operation{
+ Name: opPutBucketLogging,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?logging",
+ }
+
+ if input == nil {
+ input = &PutBucketLoggingInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ output = &PutBucketLoggingOutput{}
+ req.Data = output
+ return
+}
+
+// Set the logging parameters for a bucket and to specify permissions for who
+// can view and modify the logging parameters. To set the logging status of
+// a bucket, you must be the bucket owner.
+func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) {
+ req, out := c.PutBucketLoggingRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opPutBucketNotification = "PutBucketNotification"
+
+// PutBucketNotificationRequest generates a request for the PutBucketNotification operation.
+func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *request.Request, output *PutBucketNotificationOutput) {
+ if c.Client.Config.Logger != nil {
+ c.Client.Config.Logger.Log("This operation, PutBucketNotification, has been deprecated")
+ }
+ op := &request.Operation{
+ Name: opPutBucketNotification,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?notification",
+ }
+
+ if input == nil {
+ input = &PutBucketNotificationInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ output = &PutBucketNotificationOutput{}
+ req.Data = output
+ return
+}
+
+// Deprecated, see the PutBucketNotificationConfiguraiton operation.
+func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) {
+ req, out := c.PutBucketNotificationRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration"
+
+// PutBucketNotificationConfigurationRequest generates a request for the PutBucketNotificationConfiguration operation.
+func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificationConfigurationInput) (req *request.Request, output *PutBucketNotificationConfigurationOutput) {
+ op := &request.Operation{
+ Name: opPutBucketNotificationConfiguration,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?notification",
+ }
+
+ if input == nil {
+ input = &PutBucketNotificationConfigurationInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ output = &PutBucketNotificationConfigurationOutput{}
+ req.Data = output
+ return
+}
+
+// Enables notifications of specified events for a bucket.
+func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) {
+ req, out := c.PutBucketNotificationConfigurationRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opPutBucketPolicy = "PutBucketPolicy"
+
+// PutBucketPolicyRequest generates a request for the PutBucketPolicy operation.
+func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.Request, output *PutBucketPolicyOutput) {
+ op := &request.Operation{
+ Name: opPutBucketPolicy,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?policy",
+ }
+
+ if input == nil {
+ input = &PutBucketPolicyInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ output = &PutBucketPolicyOutput{}
+ req.Data = output
+ return
+}
+
+// Replaces a policy on a bucket. If the bucket already has a policy, the one
+// in this request completely replaces it.
+func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) {
+ req, out := c.PutBucketPolicyRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opPutBucketReplication = "PutBucketReplication"
+
+// PutBucketReplicationRequest generates a request for the PutBucketReplication operation.
+func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *request.Request, output *PutBucketReplicationOutput) {
+ op := &request.Operation{
+ Name: opPutBucketReplication,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?replication",
+ }
+
+ if input == nil {
+ input = &PutBucketReplicationInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ output = &PutBucketReplicationOutput{}
+ req.Data = output
+ return
+}
+
+// Creates a new replication configuration (or replaces an existing one, if
+// present).
+func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) {
+ req, out := c.PutBucketReplicationRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opPutBucketRequestPayment = "PutBucketRequestPayment"
+
+// PutBucketRequestPaymentRequest generates a request for the PutBucketRequestPayment operation.
+func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *request.Request, output *PutBucketRequestPaymentOutput) {
+ op := &request.Operation{
+ Name: opPutBucketRequestPayment,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?requestPayment",
+ }
+
+ if input == nil {
+ input = &PutBucketRequestPaymentInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ output = &PutBucketRequestPaymentOutput{}
+ req.Data = output
+ return
+}
+
+// Sets the request payment configuration for a bucket. By default, the bucket
+// owner pays for downloads from the bucket. This configuration parameter enables
+// the bucket owner (only) to specify that the person requesting the download
+// will be charged for the download. Documentation on requester pays buckets
+// can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html
+func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) {
+ req, out := c.PutBucketRequestPaymentRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opPutBucketTagging = "PutBucketTagging"
+
+// PutBucketTaggingRequest generates a request for the PutBucketTagging operation.
+func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request.Request, output *PutBucketTaggingOutput) {
+ op := &request.Operation{
+ Name: opPutBucketTagging,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?tagging",
+ }
+
+ if input == nil {
+ input = &PutBucketTaggingInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ output = &PutBucketTaggingOutput{}
+ req.Data = output
+ return
+}
+
+// Sets the tags for a bucket.
+func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) {
+ req, out := c.PutBucketTaggingRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opPutBucketVersioning = "PutBucketVersioning"
+
+// PutBucketVersioningRequest generates a request for the PutBucketVersioning operation.
+func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *request.Request, output *PutBucketVersioningOutput) {
+ op := &request.Operation{
+ Name: opPutBucketVersioning,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?versioning",
+ }
+
+ if input == nil {
+ input = &PutBucketVersioningInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ output = &PutBucketVersioningOutput{}
+ req.Data = output
+ return
+}
+
+// Sets the versioning state of an existing bucket. To set the versioning state,
+// you must be the bucket owner.
+func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) {
+ req, out := c.PutBucketVersioningRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opPutBucketWebsite = "PutBucketWebsite"
+
+// PutBucketWebsiteRequest generates a request for the PutBucketWebsite operation.
+func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request.Request, output *PutBucketWebsiteOutput) {
+ op := &request.Operation{
+ Name: opPutBucketWebsite,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}?website",
+ }
+
+ if input == nil {
+ input = &PutBucketWebsiteInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler)
+ req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler)
+ output = &PutBucketWebsiteOutput{}
+ req.Data = output
+ return
+}
+
+// Set the website configuration for a bucket.
+func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) {
+ req, out := c.PutBucketWebsiteRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opPutObject = "PutObject"
+
+// PutObjectRequest generates a request for the PutObject operation.
+func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) {
+ op := &request.Operation{
+ Name: opPutObject,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &PutObjectInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &PutObjectOutput{}
+ req.Data = output
+ return
+}
+
+// Adds an object to a bucket.
+func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) {
+ req, out := c.PutObjectRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opPutObjectAcl = "PutObjectAcl"
+
+// PutObjectAclRequest generates a request for the PutObjectAcl operation.
+func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) {
+ op := &request.Operation{
+ Name: opPutObjectAcl,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}/{Key+}?acl",
+ }
+
+ if input == nil {
+ input = &PutObjectAclInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &PutObjectAclOutput{}
+ req.Data = output
+ return
+}
+
+// uses the acl subresource to set the access control list (ACL) permissions
+// for an object that already exists in a bucket
+func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) {
+ req, out := c.PutObjectAclRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opRestoreObject = "RestoreObject"
+
+// RestoreObjectRequest generates a request for the RestoreObject operation.
+func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) {
+ op := &request.Operation{
+ Name: opRestoreObject,
+ HTTPMethod: "POST",
+ HTTPPath: "/{Bucket}/{Key+}?restore",
+ }
+
+ if input == nil {
+ input = &RestoreObjectInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &RestoreObjectOutput{}
+ req.Data = output
+ return
+}
+
+// Restores an archived copy of an object back into Amazon S3
+func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) {
+ req, out := c.RestoreObjectRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opUploadPart = "UploadPart"
+
+// UploadPartRequest generates a request for the UploadPart operation.
+func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) {
+ op := &request.Operation{
+ Name: opUploadPart,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &UploadPartInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &UploadPartOutput{}
+ req.Data = output
+ return
+}
+
+// Uploads a part in a multipart upload.
+//
+// Note: After you initiate multipart upload and upload one or more parts, you
+// must either complete or abort multipart upload in order to stop getting charged
+// for storage of the uploaded parts. Only after you either complete or abort
+// multipart upload, Amazon S3 frees up the parts storage and stops charging
+// you for the parts storage.
+func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) {
+ req, out := c.UploadPartRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+const opUploadPartCopy = "UploadPartCopy"
+
+// UploadPartCopyRequest generates a request for the UploadPartCopy operation.
+func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) {
+ op := &request.Operation{
+ Name: opUploadPartCopy,
+ HTTPMethod: "PUT",
+ HTTPPath: "/{Bucket}/{Key+}",
+ }
+
+ if input == nil {
+ input = &UploadPartCopyInput{}
+ }
+
+ req = c.newRequest(op, input, output)
+ output = &UploadPartCopyOutput{}
+ req.Data = output
+ return
+}
+
+// Uploads a part by copying data from an existing object as data source.
+func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) {
+ req, out := c.UploadPartCopyRequest(input)
+ err := req.Send()
+ return out, err
+}
+
+// Specifies the days since the initiation of an Incomplete Multipart Upload
+// that Lifecycle will wait before permanently removing all parts of the upload.
+type AbortIncompleteMultipartUpload struct {
+ _ struct{} `type:"structure"`
+
+ // Indicates the number of days that must pass since initiation for Lifecycle
+ // to abort an Incomplete Multipart Upload.
+ DaysAfterInitiation *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s AbortIncompleteMultipartUpload) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AbortIncompleteMultipartUpload) GoString() string {
+ return s.String()
+}
+
+type AbortMultipartUploadInput struct {
+ _ struct{} `type:"structure"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s AbortMultipartUploadInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AbortMultipartUploadInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AbortMultipartUploadInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AbortMultipartUploadInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.UploadId == nil {
+ invalidParams.Add(request.NewErrParamRequired("UploadId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type AbortMultipartUploadOutput struct {
+ _ struct{} `type:"structure"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+}
+
+// String returns the string representation
+func (s AbortMultipartUploadOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AbortMultipartUploadOutput) GoString() string {
+ return s.String()
+}
+
+type AccelerateConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // The accelerate configuration of the bucket.
+ Status *string `type:"string" enum:"BucketAccelerateStatus"`
+}
+
+// String returns the string representation
+func (s AccelerateConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AccelerateConfiguration) GoString() string {
+ return s.String()
+}
+
+type AccessControlPolicy struct {
+ _ struct{} `type:"structure"`
+
+ // A list of grants.
+ Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"`
+
+ Owner *Owner `type:"structure"`
+}
+
+// String returns the string representation
+func (s AccessControlPolicy) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s AccessControlPolicy) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *AccessControlPolicy) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "AccessControlPolicy"}
+ if s.Grants != nil {
+ for i, v := range s.Grants {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Grants", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type Bucket struct {
+ _ struct{} `type:"structure"`
+
+ // Date the bucket was created.
+ CreationDate *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ // The name of the bucket.
+ Name *string `type:"string"`
+}
+
+// String returns the string representation
+func (s Bucket) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Bucket) GoString() string {
+ return s.String()
+}
+
+type BucketLifecycleConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"`
+}
+
+// String returns the string representation
+func (s BucketLifecycleConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s BucketLifecycleConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *BucketLifecycleConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "BucketLifecycleConfiguration"}
+ if s.Rules == nil {
+ invalidParams.Add(request.NewErrParamRequired("Rules"))
+ }
+ if s.Rules != nil {
+ for i, v := range s.Rules {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type BucketLoggingStatus struct {
+ _ struct{} `type:"structure"`
+
+ LoggingEnabled *LoggingEnabled `type:"structure"`
+}
+
+// String returns the string representation
+func (s BucketLoggingStatus) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s BucketLoggingStatus) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *BucketLoggingStatus) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "BucketLoggingStatus"}
+ if s.LoggingEnabled != nil {
+ if err := s.LoggingEnabled.Validate(); err != nil {
+ invalidParams.AddNested("LoggingEnabled", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type CORSConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"`
+}
+
+// String returns the string representation
+func (s CORSConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CORSConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CORSConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CORSConfiguration"}
+ if s.CORSRules == nil {
+ invalidParams.Add(request.NewErrParamRequired("CORSRules"))
+ }
+ if s.CORSRules != nil {
+ for i, v := range s.CORSRules {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CORSRules", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type CORSRule struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies which headers are allowed in a pre-flight OPTIONS request.
+ AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"`
+
+ // Identifies HTTP methods that the domain/origin specified in the rule is allowed
+ // to execute.
+ AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true" required:"true"`
+
+ // One or more origins you want customers to be able to access the bucket from.
+ AllowedOrigins []*string `locationName:"AllowedOrigin" type:"list" flattened:"true" required:"true"`
+
+ // One or more headers in the response that you want customers to be able to
+ // access from their applications (for example, from a JavaScript XMLHttpRequest
+ // object).
+ ExposeHeaders []*string `locationName:"ExposeHeader" type:"list" flattened:"true"`
+
+ // The time in seconds that your browser is to cache the preflight response
+ // for the specified resource.
+ MaxAgeSeconds *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s CORSRule) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CORSRule) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CORSRule) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CORSRule"}
+ if s.AllowedMethods == nil {
+ invalidParams.Add(request.NewErrParamRequired("AllowedMethods"))
+ }
+ if s.AllowedOrigins == nil {
+ invalidParams.Add(request.NewErrParamRequired("AllowedOrigins"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type CloudFunctionConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ CloudFunction *string `type:"string"`
+
+ // Bucket event for which to send notifications.
+ Event *string `deprecated:"true" type:"string" enum:"Event"`
+
+ Events []*string `locationName:"Event" type:"list" flattened:"true"`
+
+ // Optional unique identifier for configurations in a notification configuration.
+ // If you don't provide one, Amazon S3 will assign an ID.
+ Id *string `type:"string"`
+
+ InvocationRole *string `type:"string"`
+}
+
+// String returns the string representation
+func (s CloudFunctionConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CloudFunctionConfiguration) GoString() string {
+ return s.String()
+}
+
+type CommonPrefix struct {
+ _ struct{} `type:"structure"`
+
+ Prefix *string `type:"string"`
+}
+
+// String returns the string representation
+func (s CommonPrefix) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CommonPrefix) GoString() string {
+ return s.String()
+}
+
+type CompleteMultipartUploadInput struct {
+ _ struct{} `type:"structure" payload:"MultipartUpload"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s CompleteMultipartUploadInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CompleteMultipartUploadInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CompleteMultipartUploadInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CompleteMultipartUploadInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.UploadId == nil {
+ invalidParams.Add(request.NewErrParamRequired("UploadId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type CompleteMultipartUploadOutput struct {
+ _ struct{} `type:"structure"`
+
+ Bucket *string `type:"string"`
+
+ // Entity tag of the object.
+ ETag *string `type:"string"`
+
+ // If the object expiration is configured, this will contain the expiration
+ // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.
+ Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
+
+ Key *string `min:"1" type:"string"`
+
+ Location *string `type:"string"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+ // If present, specifies the ID of the AWS Key Management Service (KMS) master
+ // encryption key that was used for the object.
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+ // Version of the object.
+ VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+}
+
+// String returns the string representation
+func (s CompleteMultipartUploadOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CompleteMultipartUploadOutput) GoString() string {
+ return s.String()
+}
+
+type CompletedMultipartUpload struct {
+ _ struct{} `type:"structure"`
+
+ Parts []*CompletedPart `locationName:"Part" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s CompletedMultipartUpload) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CompletedMultipartUpload) GoString() string {
+ return s.String()
+}
+
+type CompletedPart struct {
+ _ struct{} `type:"structure"`
+
+ // Entity tag returned when the part was uploaded.
+ ETag *string `type:"string"`
+
+ // Part number that identifies the part. This is a positive integer between
+ // 1 and 10,000.
+ PartNumber *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s CompletedPart) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CompletedPart) GoString() string {
+ return s.String()
+}
+
+type Condition struct {
+ _ struct{} `type:"structure"`
+
+ // The HTTP error code when the redirect is applied. In the event of an error,
+ // if the error code equals this value, then the specified redirect is applied.
+ // Required when parent element Condition is specified and sibling KeyPrefixEquals
+ // is not specified. If both are specified, then both must be true for the redirect
+ // to be applied.
+ HttpErrorCodeReturnedEquals *string `type:"string"`
+
+ // The object key name prefix when the redirect is applied. For example, to
+ // redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html.
+ // To redirect request for all pages with the prefix docs/, the key prefix will
+ // be /docs, which identifies all objects in the docs/ folder. Required when
+ // the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals
+ // is not specified. If both conditions are specified, both must be true for
+ // the redirect to be applied.
+ KeyPrefixEquals *string `type:"string"`
+}
+
+// String returns the string representation
+func (s Condition) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Condition) GoString() string {
+ return s.String()
+}
+
+type CopyObjectInput struct {
+ _ struct{} `type:"structure"`
+
+ // The canned ACL to apply to the object.
+ ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Specifies caching behavior along the request/reply chain.
+ CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
+
+ // Specifies presentational information for the object.
+ ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
+
+ // Specifies what content encodings have been applied to the object and thus
+ // what decoding mechanisms must be applied to obtain the media-type referenced
+ // by the Content-Type header field.
+ ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
+
+ // The language the content is in.
+ ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
+
+ // A standard MIME type describing the format of the object data.
+ ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
+
+ // The name of the source bucket and key name of the source object, separated
+ // by a slash (/). Must be URL-encoded.
+ CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"`
+
+ // Copies the object if its entity tag (ETag) matches the specified tag.
+ CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"`
+
+ // Copies the object if it has been modified since the specified time.
+ CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"`
+
+ // Copies the object if its entity tag (ETag) is different than the specified
+ // ETag.
+ CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"`
+
+ // Copies the object if it hasn't been modified since the specified time.
+ CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"`
+
+ // Specifies the algorithm to use when decrypting the source object (e.g., AES256).
+ CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt
+ // the source object. The encryption key provided in this header must be one
+ // that was used when the source object was created.
+ CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure the encryption
+ // key was transmitted without error.
+ CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // The date and time at which the object is no longer cacheable.
+ Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"`
+
+ // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
+ GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
+
+ // Allows grantee to read the object data and its metadata.
+ GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
+
+ // Allows grantee to read the object ACL.
+ GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
+
+ // Allows grantee to write the ACL for the applicable object.
+ GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // A map of metadata to store with the object in S3.
+ Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
+
+ // Specifies whether the metadata is copied from the source object or replaced
+ // with metadata provided in the request.
+ MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string" enum:"MetadataDirective"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+ // data. This value is used to store the object and then it is discarded; Amazon
+ // does not store the encryption key. The key must be appropriate for use with
+ // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
+ // header.
+ SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure the encryption
+ // key was transmitted without error.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
+ // requests for an object protected by AWS KMS will fail if not made via SSL
+ // or using SigV4. Documentation on configuring any of the officially supported
+ // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+ // The type of storage to use for the object. Defaults to 'STANDARD'.
+ StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
+
+ // If the bucket is configured as a website, redirects requests for this object
+ // to another object in the same bucket or to an external URL. Amazon S3 stores
+ // the value of this header in the object metadata.
+ WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
+}
+
+// String returns the string representation
+func (s CopyObjectInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CopyObjectInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CopyObjectInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CopyObjectInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.CopySource == nil {
+ invalidParams.Add(request.NewErrParamRequired("CopySource"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type CopyObjectOutput struct {
+ _ struct{} `type:"structure" payload:"CopyObjectResult"`
+
+ CopyObjectResult *CopyObjectResult `type:"structure"`
+
+ CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"`
+
+ // If the object expiration is configured, the response includes this header.
+ Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm
+ // used.
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // If present, specifies the ID of the AWS Key Management Service (KMS) master
+ // encryption key that was used for the object.
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+ // Version ID of the newly created copy.
+ VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+}
+
+// String returns the string representation
+func (s CopyObjectOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CopyObjectOutput) GoString() string {
+ return s.String()
+}
+
+type CopyObjectResult struct {
+ _ struct{} `type:"structure"`
+
+ ETag *string `type:"string"`
+
+ LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+}
+
+// String returns the string representation
+func (s CopyObjectResult) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CopyObjectResult) GoString() string {
+ return s.String()
+}
+
+type CopyPartResult struct {
+ _ struct{} `type:"structure"`
+
+ // Entity tag of the object.
+ ETag *string `type:"string"`
+
+ // Date and time at which the object was uploaded.
+ LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+}
+
+// String returns the string representation
+func (s CopyPartResult) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CopyPartResult) GoString() string {
+ return s.String()
+}
+
+type CreateBucketConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies the region where the bucket will be created. If you don't specify
+ // a region, the bucket will be created in US Standard.
+ LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"`
+}
+
+// String returns the string representation
+func (s CreateBucketConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateBucketConfiguration) GoString() string {
+ return s.String()
+}
+
+type CreateBucketInput struct {
+ _ struct{} `type:"structure" payload:"CreateBucketConfiguration"`
+
+ // The canned ACL to apply to the bucket.
+ ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure"`
+
+ // Allows grantee the read, write, read ACP, and write ACP permissions on the
+ // bucket.
+ GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
+
+ // Allows grantee to list the objects in the bucket.
+ GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
+
+ // Allows grantee to read the bucket ACL.
+ GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
+
+ // Allows grantee to create, overwrite, and delete any object in the bucket.
+ GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"`
+
+ // Allows grantee to write the ACL for the applicable bucket.
+ GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+}
+
+// String returns the string representation
+func (s CreateBucketInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateBucketInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateBucketInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateBucketInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type CreateBucketOutput struct {
+ _ struct{} `type:"structure"`
+
+ Location *string `location:"header" locationName:"Location" type:"string"`
+}
+
+// String returns the string representation
+func (s CreateBucketOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateBucketOutput) GoString() string {
+ return s.String()
+}
+
+type CreateMultipartUploadInput struct {
+ _ struct{} `type:"structure"`
+
+ // The canned ACL to apply to the object.
+ ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Specifies caching behavior along the request/reply chain.
+ CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
+
+ // Specifies presentational information for the object.
+ ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
+
+ // Specifies what content encodings have been applied to the object and thus
+ // what decoding mechanisms must be applied to obtain the media-type referenced
+ // by the Content-Type header field.
+ ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
+
+ // The language the content is in.
+ ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
+
+ // A standard MIME type describing the format of the object data.
+ ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
+
+ // The date and time at which the object is no longer cacheable.
+ Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"`
+
+ // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
+ GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
+
+ // Allows grantee to read the object data and its metadata.
+ GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
+
+ // Allows grantee to read the object ACL.
+ GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
+
+ // Allows grantee to write the ACL for the applicable object.
+ GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // A map of metadata to store with the object in S3.
+ Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+ // data. This value is used to store the object and then it is discarded; Amazon
+ // does not store the encryption key. The key must be appropriate for use with
+ // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
+ // header.
+ SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure the encryption
+ // key was transmitted without error.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
+ // requests for an object protected by AWS KMS will fail if not made via SSL
+ // or using SigV4. Documentation on configuring any of the officially supported
+ // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+ // The type of storage to use for the object. Defaults to 'STANDARD'.
+ StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
+
+ // If the bucket is configured as a website, redirects requests for this object
+ // to another object in the same bucket or to an external URL. Amazon S3 stores
+ // the value of this header in the object metadata.
+ WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
+}
+
+// String returns the string representation
+func (s CreateMultipartUploadInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateMultipartUploadInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *CreateMultipartUploadInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "CreateMultipartUploadInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type CreateMultipartUploadOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Date when multipart upload will become eligible for abort operation by lifecycle.
+ AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"`
+
+ // Id of the lifecycle rule that makes a multipart upload eligible for abort
+ // operation.
+ AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"`
+
+ // Name of the bucket to which the multipart upload was initiated.
+ Bucket *string `locationName:"Bucket" type:"string"`
+
+ // Object key for which the multipart upload was initiated.
+ Key *string `min:"1" type:"string"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm
+ // used.
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // If present, specifies the ID of the AWS Key Management Service (KMS) master
+ // encryption key that was used for the object.
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+ // ID for the initiated multipart upload.
+ UploadId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s CreateMultipartUploadOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s CreateMultipartUploadOutput) GoString() string {
+ return s.String()
+}
+
+type Delete struct {
+ _ struct{} `type:"structure"`
+
+ Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"`
+
+ // Element to enable quiet mode for the request. When you add this element,
+ // you must set its value to true.
+ Quiet *bool `type:"boolean"`
+}
+
+// String returns the string representation
+func (s Delete) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Delete) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Delete) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "Delete"}
+ if s.Objects == nil {
+ invalidParams.Add(request.NewErrParamRequired("Objects"))
+ }
+ if s.Objects != nil {
+ for i, v := range s.Objects {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Objects", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type DeleteBucketCorsInput struct {
+ _ struct{} `type:"structure"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketCorsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketCorsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketCorsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteBucketCorsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type DeleteBucketCorsOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBucketCorsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketCorsOutput) GoString() string {
+ return s.String()
+}
+
+type DeleteBucketInput struct {
+ _ struct{} `type:"structure"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type DeleteBucketLifecycleInput struct {
+ _ struct{} `type:"structure"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketLifecycleInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketLifecycleInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketLifecycleInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteBucketLifecycleInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type DeleteBucketLifecycleOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBucketLifecycleOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketLifecycleOutput) GoString() string {
+ return s.String()
+}
+
+type DeleteBucketOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBucketOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketOutput) GoString() string {
+ return s.String()
+}
+
+type DeleteBucketPolicyInput struct {
+ _ struct{} `type:"structure"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketPolicyInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketPolicyInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketPolicyInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteBucketPolicyInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type DeleteBucketPolicyOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBucketPolicyOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketPolicyOutput) GoString() string {
+ return s.String()
+}
+
+type DeleteBucketReplicationInput struct {
+ _ struct{} `type:"structure"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketReplicationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketReplicationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketReplicationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteBucketReplicationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type DeleteBucketReplicationOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBucketReplicationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketReplicationOutput) GoString() string {
+ return s.String()
+}
+
+type DeleteBucketTaggingInput struct {
+ _ struct{} `type:"structure"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketTaggingInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketTaggingInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketTaggingInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteBucketTaggingInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type DeleteBucketTaggingOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBucketTaggingOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketTaggingOutput) GoString() string {
+ return s.String()
+}
+
+type DeleteBucketWebsiteInput struct {
+ _ struct{} `type:"structure"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s DeleteBucketWebsiteInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketWebsiteInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteBucketWebsiteInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteBucketWebsiteInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type DeleteBucketWebsiteOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s DeleteBucketWebsiteOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteBucketWebsiteOutput) GoString() string {
+ return s.String()
+}
+
+type DeleteMarkerEntry struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies whether the object is (true) or is not (false) the latest version
+ // of an object.
+ IsLatest *bool `type:"boolean"`
+
+ // The object key.
+ Key *string `min:"1" type:"string"`
+
+ // Date and time the object was last modified.
+ LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ Owner *Owner `type:"structure"`
+
+ // Version ID of an object.
+ VersionId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s DeleteMarkerEntry) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteMarkerEntry) GoString() string {
+ return s.String()
+}
+
+type DeleteObjectInput struct {
+ _ struct{} `type:"structure"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // The concatenation of the authentication device's serial number, a space,
+ // and the value that is displayed on your authentication device.
+ MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // VersionId used to reference a specific version of the object.
+ VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation
+func (s DeleteObjectInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteObjectInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteObjectInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteObjectInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type DeleteObjectOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies whether the versioned object that was permanently deleted was (true)
+ // or was not (false) a delete marker.
+ DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+ // Returns the version ID of the delete marker created as a result of the DELETE
+ // operation.
+ VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+}
+
+// String returns the string representation
+func (s DeleteObjectOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteObjectOutput) GoString() string {
+ return s.String()
+}
+
+type DeleteObjectsInput struct {
+ _ struct{} `type:"structure" payload:"Delete"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ Delete *Delete `locationName:"Delete" type:"structure" required:"true"`
+
+ // The concatenation of the authentication device's serial number, a space,
+ // and the value that is displayed on your authentication device.
+ MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+}
+
+// String returns the string representation
+func (s DeleteObjectsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteObjectsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *DeleteObjectsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "DeleteObjectsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Delete == nil {
+ invalidParams.Add(request.NewErrParamRequired("Delete"))
+ }
+ if s.Delete != nil {
+ if err := s.Delete.Validate(); err != nil {
+ invalidParams.AddNested("Delete", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type DeleteObjectsOutput struct {
+ _ struct{} `type:"structure"`
+
+ Deleted []*DeletedObject `type:"list" flattened:"true"`
+
+ Errors []*Error `locationName:"Error" type:"list" flattened:"true"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+}
+
+// String returns the string representation
+func (s DeleteObjectsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeleteObjectsOutput) GoString() string {
+ return s.String()
+}
+
+type DeletedObject struct {
+ _ struct{} `type:"structure"`
+
+ DeleteMarker *bool `type:"boolean"`
+
+ DeleteMarkerVersionId *string `type:"string"`
+
+ Key *string `min:"1" type:"string"`
+
+ VersionId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s DeletedObject) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s DeletedObject) GoString() string {
+ return s.String()
+}
+
+type Destination struct {
+ _ struct{} `type:"structure"`
+
+ // Amazon resource name (ARN) of the bucket where you want Amazon S3 to store
+ // replicas of the object identified by the rule.
+ Bucket *string `type:"string" required:"true"`
+
+ // The class of storage used to store the object.
+ StorageClass *string `type:"string" enum:"StorageClass"`
+}
+
+// String returns the string representation
+func (s Destination) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Destination) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Destination) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "Destination"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type Error struct {
+ _ struct{} `type:"structure"`
+
+ Code *string `type:"string"`
+
+ Key *string `min:"1" type:"string"`
+
+ Message *string `type:"string"`
+
+ VersionId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s Error) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Error) GoString() string {
+ return s.String()
+}
+
+type ErrorDocument struct {
+ _ struct{} `type:"structure"`
+
+ // The object key name to use when a 4XX class error occurs.
+ Key *string `min:"1" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s ErrorDocument) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ErrorDocument) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ErrorDocument) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ErrorDocument"}
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Container for key value pair that defines the criteria for the filter rule.
+type FilterRule struct {
+ _ struct{} `type:"structure"`
+
+ // Object key name prefix or suffix identifying one or more objects to which
+ // the filtering rule applies. Maximum prefix length can be up to 1,024 characters.
+ // Overlapping prefixes and suffixes are not supported. For more information,
+ // go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
+ // in the Amazon Simple Storage Service Developer Guide.
+ Name *string `type:"string" enum:"FilterRuleName"`
+
+ Value *string `type:"string"`
+}
+
+// String returns the string representation
+func (s FilterRule) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s FilterRule) GoString() string {
+ return s.String()
+}
+
+type GetBucketAccelerateConfigurationInput struct {
+ _ struct{} `type:"structure"`
+
+ // Name of the bucket for which the accelerate configuration is retrieved.
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketAccelerateConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketAccelerateConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketAccelerateConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketAccelerateConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type GetBucketAccelerateConfigurationOutput struct {
+ _ struct{} `type:"structure"`
+
+ // The accelerate configuration of the bucket.
+ Status *string `type:"string" enum:"BucketAccelerateStatus"`
+}
+
+// String returns the string representation
+func (s GetBucketAccelerateConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketAccelerateConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+type GetBucketAclInput struct {
+ _ struct{} `type:"structure"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketAclInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketAclInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketAclInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketAclInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type GetBucketAclOutput struct {
+ _ struct{} `type:"structure"`
+
+ // A list of grants.
+ Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"`
+
+ Owner *Owner `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetBucketAclOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketAclOutput) GoString() string {
+ return s.String()
+}
+
+type GetBucketCorsInput struct {
+ _ struct{} `type:"structure"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketCorsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketCorsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketCorsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketCorsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type GetBucketCorsOutput struct {
+ _ struct{} `type:"structure"`
+
+ CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketCorsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketCorsOutput) GoString() string {
+ return s.String()
+}
+
+type GetBucketLifecycleConfigurationInput struct {
+ _ struct{} `type:"structure"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketLifecycleConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketLifecycleConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketLifecycleConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type GetBucketLifecycleConfigurationOutput struct {
+ _ struct{} `type:"structure"`
+
+ Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketLifecycleConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketLifecycleConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+type GetBucketLifecycleInput struct {
+ _ struct{} `type:"structure"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketLifecycleInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketLifecycleInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketLifecycleInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type GetBucketLifecycleOutput struct {
+ _ struct{} `type:"structure"`
+
+ Rules []*Rule `locationName:"Rule" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketLifecycleOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketLifecycleOutput) GoString() string {
+ return s.String()
+}
+
+type GetBucketLocationInput struct {
+ _ struct{} `type:"structure"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketLocationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketLocationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketLocationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketLocationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type GetBucketLocationOutput struct {
+ _ struct{} `type:"structure"`
+
+ LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"`
+}
+
+// String returns the string representation
+func (s GetBucketLocationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketLocationOutput) GoString() string {
+ return s.String()
+}
+
+type GetBucketLoggingInput struct {
+ _ struct{} `type:"structure"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketLoggingInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketLoggingInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketLoggingInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketLoggingInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type GetBucketLoggingOutput struct {
+ _ struct{} `type:"structure"`
+
+ LoggingEnabled *LoggingEnabled `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetBucketLoggingOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketLoggingOutput) GoString() string {
+ return s.String()
+}
+
+type GetBucketNotificationConfigurationRequest struct {
+ _ struct{} `type:"structure"`
+
+ // Name of the bucket to get the notification configuration for.
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketNotificationConfigurationRequest) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketNotificationConfigurationRequest) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketNotificationConfigurationRequest) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketNotificationConfigurationRequest"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type GetBucketPolicyInput struct {
+ _ struct{} `type:"structure"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketPolicyInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketPolicyInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketPolicyInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketPolicyInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type GetBucketPolicyOutput struct {
+ _ struct{} `type:"structure" payload:"Policy"`
+
+ // The bucket policy as a JSON document.
+ Policy *string `type:"string"`
+}
+
+// String returns the string representation
+func (s GetBucketPolicyOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketPolicyOutput) GoString() string {
+ return s.String()
+}
+
+type GetBucketReplicationInput struct {
+ _ struct{} `type:"structure"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketReplicationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketReplicationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketReplicationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketReplicationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type GetBucketReplicationOutput struct {
+ _ struct{} `type:"structure" payload:"ReplicationConfiguration"`
+
+ // Container for replication rules. You can add as many as 1,000 rules. Total
+ // replication configuration size can be up to 2 MB.
+ ReplicationConfiguration *ReplicationConfiguration `type:"structure"`
+}
+
+// String returns the string representation
+func (s GetBucketReplicationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketReplicationOutput) GoString() string {
+ return s.String()
+}
+
+type GetBucketRequestPaymentInput struct {
+ _ struct{} `type:"structure"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketRequestPaymentInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketRequestPaymentInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketRequestPaymentInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketRequestPaymentInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type GetBucketRequestPaymentOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies who pays for the download and request fees.
+ Payer *string `type:"string" enum:"Payer"`
+}
+
+// String returns the string representation
+func (s GetBucketRequestPaymentOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketRequestPaymentOutput) GoString() string {
+ return s.String()
+}
+
+type GetBucketTaggingInput struct {
+ _ struct{} `type:"structure"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketTaggingInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketTaggingInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketTaggingInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketTaggingInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type GetBucketTaggingOutput struct {
+ _ struct{} `type:"structure"`
+
+ TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketTaggingOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketTaggingOutput) GoString() string {
+ return s.String()
+}
+
+type GetBucketVersioningInput struct {
+ _ struct{} `type:"structure"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketVersioningInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketVersioningInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketVersioningInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketVersioningInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type GetBucketVersioningOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies whether MFA delete is enabled in the bucket versioning configuration.
+ // This element is only returned if the bucket has been configured with MFA
+ // delete. If the bucket has never been so configured, this element is not returned.
+ MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADeleteStatus"`
+
+ // The versioning state of the bucket.
+ Status *string `type:"string" enum:"BucketVersioningStatus"`
+}
+
+// String returns the string representation
+func (s GetBucketVersioningOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketVersioningOutput) GoString() string {
+ return s.String()
+}
+
+type GetBucketWebsiteInput struct {
+ _ struct{} `type:"structure"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s GetBucketWebsiteInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketWebsiteInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetBucketWebsiteInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetBucketWebsiteInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type GetBucketWebsiteOutput struct {
+ _ struct{} `type:"structure"`
+
+ ErrorDocument *ErrorDocument `type:"structure"`
+
+ IndexDocument *IndexDocument `type:"structure"`
+
+ RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"`
+
+ RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"`
+}
+
+// String returns the string representation
+func (s GetBucketWebsiteOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetBucketWebsiteOutput) GoString() string {
+ return s.String()
+}
+
+type GetObjectAclInput struct {
+ _ struct{} `type:"structure"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // VersionId used to reference a specific version of the object.
+ VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation
+func (s GetObjectAclInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetObjectAclInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetObjectAclInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetObjectAclInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type GetObjectAclOutput struct {
+ _ struct{} `type:"structure"`
+
+ // A list of grants.
+ Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"`
+
+ Owner *Owner `type:"structure"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+}
+
+// String returns the string representation
+func (s GetObjectAclOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetObjectAclOutput) GoString() string {
+ return s.String()
+}
+
+type GetObjectInput struct {
+ _ struct{} `type:"structure"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Return the object only if its entity tag (ETag) is the same as the one specified,
+ // otherwise return a 412 (precondition failed).
+ IfMatch *string `location:"header" locationName:"If-Match" type:"string"`
+
+ // Return the object only if it has been modified since the specified time,
+ // otherwise return a 304 (not modified).
+ IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"`
+
+ // Return the object only if its entity tag (ETag) is different from the one
+ // specified, otherwise return a 304 (not modified).
+ IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"`
+
+ // Return the object only if it has not been modified since the specified time,
+ // otherwise return a 412 (precondition failed).
+ IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"`
+
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Downloads the specified range bytes of an object. For more information about
+ // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
+ Range *string `location:"header" locationName:"Range" type:"string"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // Sets the Cache-Control header of the response.
+ ResponseCacheControl *string `location:"querystring" locationName:"response-cache-control" type:"string"`
+
+ // Sets the Content-Disposition header of the response
+ ResponseContentDisposition *string `location:"querystring" locationName:"response-content-disposition" type:"string"`
+
+ // Sets the Content-Encoding header of the response.
+ ResponseContentEncoding *string `location:"querystring" locationName:"response-content-encoding" type:"string"`
+
+ // Sets the Content-Language header of the response.
+ ResponseContentLanguage *string `location:"querystring" locationName:"response-content-language" type:"string"`
+
+ // Sets the Content-Type header of the response.
+ ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"`
+
+ // Sets the Expires header of the response.
+ ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"iso8601"`
+
+ // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+ // data. This value is used to store the object and then it is discarded; Amazon
+ // does not store the encryption key. The key must be appropriate for use with
+ // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
+ // header.
+ SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure the encryption
+ // key was transmitted without error.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // VersionId used to reference a specific version of the object.
+ VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation
+func (s GetObjectInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetObjectInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetObjectInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetObjectInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type GetObjectOutput struct {
+ _ struct{} `type:"structure" payload:"Body"`
+
+ AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"`
+
+ // Object data.
+ Body io.ReadCloser `type:"blob"`
+
+ // Specifies caching behavior along the request/reply chain.
+ CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
+
+ // Specifies presentational information for the object.
+ ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
+
+ // Specifies what content encodings have been applied to the object and thus
+ // what decoding mechanisms must be applied to obtain the media-type referenced
+ // by the Content-Type header field.
+ ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
+
+ // The language the content is in.
+ ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
+
+ // Size of the body in bytes.
+ ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"`
+
+ // The portion of the object returned in the response.
+ ContentRange *string `location:"header" locationName:"Content-Range" type:"string"`
+
+ // A standard MIME type describing the format of the object data.
+ ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
+
+ // Specifies whether the object retrieved was (true) or was not (false) a Delete
+ // Marker. If false, this response header does not appear in the response.
+ DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"`
+
+ // An ETag is an opaque identifier assigned by a web server to a specific version
+ // of a resource found at a URL
+ ETag *string `location:"header" locationName:"ETag" type:"string"`
+
+ // If the object expiration is configured (see PUT Bucket lifecycle), the response
+ // includes this header. It includes the expiry-date and rule-id key value pairs
+ // providing object expiration information. The value of the rule-id is URL
+ // encoded.
+ Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
+
+ // The date and time at which the object is no longer cacheable.
+ Expires *string `location:"header" locationName:"Expires" type:"string"`
+
+ // Last modified date of the object
+ LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"`
+
+ // A map of metadata to store with the object in S3.
+ Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
+
+ // This is set to the number of metadata entries not returned in x-amz-meta
+ // headers. This can happen if you create metadata using an API like SOAP that
+ // supports more flexible metadata than the REST API. For example, using SOAP,
+ // you can create metadata whose values are not legal HTTP headers.
+ MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"`
+
+ ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+ // Provides information about object restoration operation and expiration time
+ // of the restored object copy.
+ Restore *string `location:"header" locationName:"x-amz-restore" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm
+ // used.
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // If present, specifies the ID of the AWS Key Management Service (KMS) master
+ // encryption key that was used for the object.
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+ StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
+
+ // Version of the object.
+ VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+
+ // If the bucket is configured as a website, redirects requests for this object
+ // to another object in the same bucket or to an external URL. Amazon S3 stores
+ // the value of this header in the object metadata.
+ WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
+}
+
+// String returns the string representation
+func (s GetObjectOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetObjectOutput) GoString() string {
+ return s.String()
+}
+
+type GetObjectTorrentInput struct {
+ _ struct{} `type:"structure"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+}
+
+// String returns the string representation
+func (s GetObjectTorrentInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetObjectTorrentInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *GetObjectTorrentInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "GetObjectTorrentInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type GetObjectTorrentOutput struct {
+ _ struct{} `type:"structure" payload:"Body"`
+
+ Body io.ReadCloser `type:"blob"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+}
+
+// String returns the string representation
+func (s GetObjectTorrentOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s GetObjectTorrentOutput) GoString() string {
+ return s.String()
+}
+
+type Grant struct {
+ _ struct{} `type:"structure"`
+
+ Grantee *Grantee `type:"structure"`
+
+ // Specifies the permission given to the grantee.
+ Permission *string `type:"string" enum:"Permission"`
+}
+
+// String returns the string representation
+func (s Grant) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Grant) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Grant) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "Grant"}
+ if s.Grantee != nil {
+ if err := s.Grantee.Validate(); err != nil {
+ invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type Grantee struct {
+ _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"`
+
+ // Screen name of the grantee.
+ DisplayName *string `type:"string"`
+
+ // Email address of the grantee.
+ EmailAddress *string `type:"string"`
+
+ // The canonical user ID of the grantee.
+ ID *string `type:"string"`
+
+ // Type of grantee
+ Type *string `locationName:"xsi:type" type:"string" xmlAttribute:"true" required:"true" enum:"Type"`
+
+ // URI of the grantee group.
+ URI *string `type:"string"`
+}
+
+// String returns the string representation
+func (s Grantee) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Grantee) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Grantee) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "Grantee"}
+ if s.Type == nil {
+ invalidParams.Add(request.NewErrParamRequired("Type"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type HeadBucketInput struct {
+ _ struct{} `type:"structure"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s HeadBucketInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s HeadBucketInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *HeadBucketInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "HeadBucketInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type HeadBucketOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s HeadBucketOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s HeadBucketOutput) GoString() string {
+ return s.String()
+}
+
+type HeadObjectInput struct {
+ _ struct{} `type:"structure"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Return the object only if its entity tag (ETag) is the same as the one specified,
+ // otherwise return a 412 (precondition failed).
+ IfMatch *string `location:"header" locationName:"If-Match" type:"string"`
+
+ // Return the object only if it has been modified since the specified time,
+ // otherwise return a 304 (not modified).
+ IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"`
+
+ // Return the object only if its entity tag (ETag) is different from the one
+ // specified, otherwise return a 304 (not modified).
+ IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"`
+
+ // Return the object only if it has not been modified since the specified time,
+ // otherwise return a 412 (precondition failed).
+ IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"`
+
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Downloads the specified range bytes of an object. For more information about
+ // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.
+ Range *string `location:"header" locationName:"Range" type:"string"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+ // data. This value is used to store the object and then it is discarded; Amazon
+ // does not store the encryption key. The key must be appropriate for use with
+ // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
+ // header.
+ SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure the encryption
+ // key was transmitted without error.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // VersionId used to reference a specific version of the object.
+ VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation
+func (s HeadObjectInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s HeadObjectInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *HeadObjectInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "HeadObjectInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type HeadObjectOutput struct {
+ _ struct{} `type:"structure"`
+
+ AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"`
+
+ // Specifies caching behavior along the request/reply chain.
+ CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
+
+ // Specifies presentational information for the object.
+ ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
+
+ // Specifies what content encodings have been applied to the object and thus
+ // what decoding mechanisms must be applied to obtain the media-type referenced
+ // by the Content-Type header field.
+ ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
+
+ // The language the content is in.
+ ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
+
+ // Size of the body in bytes.
+ ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"`
+
+ // A standard MIME type describing the format of the object data.
+ ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
+
+ // Specifies whether the object retrieved was (true) or was not (false) a Delete
+ // Marker. If false, this response header does not appear in the response.
+ DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"`
+
+ // An ETag is an opaque identifier assigned by a web server to a specific version
+ // of a resource found at a URL
+ ETag *string `location:"header" locationName:"ETag" type:"string"`
+
+ // If the object expiration is configured (see PUT Bucket lifecycle), the response
+ // includes this header. It includes the expiry-date and rule-id key value pairs
+ // providing object expiration information. The value of the rule-id is URL
+ // encoded.
+ Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
+
+ // The date and time at which the object is no longer cacheable.
+ Expires *string `location:"header" locationName:"Expires" type:"string"`
+
+ // Last modified date of the object
+ LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"`
+
+ // A map of metadata to store with the object in S3.
+ Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
+
+ // This is set to the number of metadata entries not returned in x-amz-meta
+ // headers. This can happen if you create metadata using an API like SOAP that
+ // supports more flexible metadata than the REST API. For example, using SOAP,
+ // you can create metadata whose values are not legal HTTP headers.
+ MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"`
+
+ ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+ // Provides information about object restoration operation and expiration time
+ // of the restored object copy.
+ Restore *string `location:"header" locationName:"x-amz-restore" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm
+ // used.
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // If present, specifies the ID of the AWS Key Management Service (KMS) master
+ // encryption key that was used for the object.
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+ StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
+
+ // Version of the object.
+ VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+
+ // If the bucket is configured as a website, redirects requests for this object
+ // to another object in the same bucket or to an external URL. Amazon S3 stores
+ // the value of this header in the object metadata.
+ WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
+}
+
+// String returns the string representation
+func (s HeadObjectOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s HeadObjectOutput) GoString() string {
+ return s.String()
+}
+
+type IndexDocument struct {
+ _ struct{} `type:"structure"`
+
+ // A suffix that is appended to a request that is for a directory on the website
+ // endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/
+ // the data that is returned will be for the object with the key name images/index.html)
+ // The suffix must not be empty and must not include a slash character.
+ Suffix *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s IndexDocument) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s IndexDocument) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *IndexDocument) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "IndexDocument"}
+ if s.Suffix == nil {
+ invalidParams.Add(request.NewErrParamRequired("Suffix"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type Initiator struct {
+ _ struct{} `type:"structure"`
+
+ // Name of the Principal.
+ DisplayName *string `type:"string"`
+
+ // If the principal is an AWS account, it provides the Canonical User ID. If
+ // the principal is an IAM User, it provides a user ARN value.
+ ID *string `type:"string"`
+}
+
+// String returns the string representation
+func (s Initiator) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Initiator) GoString() string {
+ return s.String()
+}
+
+// Container for object key name prefix and suffix filtering rules.
+type KeyFilter struct {
+ _ struct{} `type:"structure"`
+
+ // A list of containers for key value pair that defines the criteria for the
+ // filter rule.
+ FilterRules []*FilterRule `locationName:"FilterRule" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s KeyFilter) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s KeyFilter) GoString() string {
+ return s.String()
+}
+
+// Container for specifying the AWS Lambda notification configuration.
+type LambdaFunctionConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"`
+
+ // Container for object key name filtering rules. For information about key
+ // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
+ // in the Amazon Simple Storage Service Developer Guide.
+ Filter *NotificationConfigurationFilter `type:"structure"`
+
+ // Optional unique identifier for configurations in a notification configuration.
+ // If you don't provide one, Amazon S3 will assign an ID.
+ Id *string `type:"string"`
+
+ // Lambda cloud function ARN that Amazon S3 can invoke when it detects events
+ // of the specified type.
+ LambdaFunctionArn *string `locationName:"CloudFunction" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s LambdaFunctionConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LambdaFunctionConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *LambdaFunctionConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "LambdaFunctionConfiguration"}
+ if s.Events == nil {
+ invalidParams.Add(request.NewErrParamRequired("Events"))
+ }
+ if s.LambdaFunctionArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("LambdaFunctionArn"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type LifecycleConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ Rules []*Rule `locationName:"Rule" type:"list" flattened:"true" required:"true"`
+}
+
+// String returns the string representation
+func (s LifecycleConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LifecycleConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *LifecycleConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "LifecycleConfiguration"}
+ if s.Rules == nil {
+ invalidParams.Add(request.NewErrParamRequired("Rules"))
+ }
+ if s.Rules != nil {
+ for i, v := range s.Rules {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type LifecycleExpiration struct {
+ _ struct{} `type:"structure"`
+
+ // Indicates at what date the object is to be moved or deleted. Should be in
+ // GMT ISO 8601 Format.
+ Date *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ // Indicates the lifetime, in days, of the objects that are subject to the rule.
+ // The value must be a non-zero positive integer.
+ Days *int64 `type:"integer"`
+
+ // Indicates whether Amazon S3 will remove a delete marker with no noncurrent
+ // versions. If set to true, the delete marker will be expired; if set to false
+ // the policy takes no action. This cannot be specified with Days or Date in
+ // a Lifecycle Expiration Policy.
+ ExpiredObjectDeleteMarker *bool `type:"boolean"`
+}
+
+// String returns the string representation
+func (s LifecycleExpiration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LifecycleExpiration) GoString() string {
+ return s.String()
+}
+
+type LifecycleRule struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies the days since the initiation of an Incomplete Multipart Upload
+ // that Lifecycle will wait before permanently removing all parts of the upload.
+ AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"`
+
+ Expiration *LifecycleExpiration `type:"structure"`
+
+ // Unique identifier for the rule. The value cannot be longer than 255 characters.
+ ID *string `type:"string"`
+
+ // Specifies when noncurrent object versions expire. Upon expiration, Amazon
+ // S3 permanently deletes the noncurrent object versions. You set this lifecycle
+ // configuration action on a bucket that has versioning enabled (or suspended)
+ // to request that Amazon S3 delete noncurrent object versions at a specific
+ // period in the object's lifetime.
+ NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"`
+
+ NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"`
+
+ // Prefix identifying one or more objects to which the rule applies.
+ Prefix *string `type:"string" required:"true"`
+
+ // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule
+ // is not currently being applied.
+ Status *string `type:"string" required:"true" enum:"ExpirationStatus"`
+
+ Transitions []*Transition `locationName:"Transition" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s LifecycleRule) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LifecycleRule) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *LifecycleRule) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "LifecycleRule"}
+ if s.Prefix == nil {
+ invalidParams.Add(request.NewErrParamRequired("Prefix"))
+ }
+ if s.Status == nil {
+ invalidParams.Add(request.NewErrParamRequired("Status"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type ListBucketsInput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s ListBucketsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListBucketsInput) GoString() string {
+ return s.String()
+}
+
+type ListBucketsOutput struct {
+ _ struct{} `type:"structure"`
+
+ Buckets []*Bucket `locationNameList:"Bucket" type:"list"`
+
+ Owner *Owner `type:"structure"`
+}
+
+// String returns the string representation
+func (s ListBucketsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListBucketsOutput) GoString() string {
+ return s.String()
+}
+
+type ListMultipartUploadsInput struct {
+ _ struct{} `type:"structure"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Character you use to group keys.
+ Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"`
+
+ // Requests Amazon S3 to encode the object keys in the response and specifies
+ // the encoding method to use. An object key may contain any Unicode character;
+ // however, XML 1.0 parser cannot parse some characters, such as characters
+ // with an ASCII value from 0 to 10. For characters that are not supported in
+ // XML 1.0, you can add this parameter to request that Amazon S3 encode the
+ // keys in the response.
+ EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"`
+
+ // Together with upload-id-marker, this parameter specifies the multipart upload
+ // after which listing should begin.
+ KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"`
+
+ // Sets the maximum number of multipart uploads, from 1 to 1,000, to return
+ // in the response body. 1,000 is the maximum number of uploads that can be
+ // returned in a response.
+ MaxUploads *int64 `location:"querystring" locationName:"max-uploads" type:"integer"`
+
+ // Lists in-progress uploads only for those keys that begin with the specified
+ // prefix.
+ Prefix *string `location:"querystring" locationName:"prefix" type:"string"`
+
+ // Together with key-marker, specifies the multipart upload after which listing
+ // should begin. If key-marker is not specified, the upload-id-marker parameter
+ // is ignored.
+ UploadIdMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"`
+}
+
+// String returns the string representation
+func (s ListMultipartUploadsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListMultipartUploadsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListMultipartUploadsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListMultipartUploadsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type ListMultipartUploadsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Name of the bucket to which the multipart upload was initiated.
+ Bucket *string `type:"string"`
+
+ CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"`
+
+ Delimiter *string `type:"string"`
+
+ // Encoding type used by Amazon S3 to encode object keys in the response.
+ EncodingType *string `type:"string" enum:"EncodingType"`
+
+ // Indicates whether the returned list of multipart uploads is truncated. A
+ // value of true indicates that the list was truncated. The list can be truncated
+ // if the number of multipart uploads exceeds the limit allowed or specified
+ // by max uploads.
+ IsTruncated *bool `type:"boolean"`
+
+ // The key at or after which the listing began.
+ KeyMarker *string `type:"string"`
+
+ // Maximum number of multipart uploads that could have been included in the
+ // response.
+ MaxUploads *int64 `type:"integer"`
+
+ // When a list is truncated, this element specifies the value that should be
+ // used for the key-marker request parameter in a subsequent request.
+ NextKeyMarker *string `type:"string"`
+
+ // When a list is truncated, this element specifies the value that should be
+ // used for the upload-id-marker request parameter in a subsequent request.
+ NextUploadIdMarker *string `type:"string"`
+
+ // When a prefix is provided in the request, this field contains the specified
+ // prefix. The result contains only keys starting with the specified prefix.
+ Prefix *string `type:"string"`
+
+ // Upload ID after which listing began.
+ UploadIdMarker *string `type:"string"`
+
+ Uploads []*MultipartUpload `locationName:"Upload" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s ListMultipartUploadsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListMultipartUploadsOutput) GoString() string {
+ return s.String()
+}
+
+type ListObjectVersionsInput struct {
+ _ struct{} `type:"structure"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // A delimiter is a character you use to group keys.
+ Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"`
+
+ // Requests Amazon S3 to encode the object keys in the response and specifies
+ // the encoding method to use. An object key may contain any Unicode character;
+ // however, XML 1.0 parser cannot parse some characters, such as characters
+ // with an ASCII value from 0 to 10. For characters that are not supported in
+ // XML 1.0, you can add this parameter to request that Amazon S3 encode the
+ // keys in the response.
+ EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"`
+
+ // Specifies the key to start with when listing objects in a bucket.
+ KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"`
+
+ // Sets the maximum number of keys returned in the response. The response might
+ // contain fewer keys but will never contain more.
+ MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"`
+
+ // Limits the response to keys that begin with the specified prefix.
+ Prefix *string `location:"querystring" locationName:"prefix" type:"string"`
+
+ // Specifies the object version you want to start listing from.
+ VersionIdMarker *string `location:"querystring" locationName:"version-id-marker" type:"string"`
+}
+
+// String returns the string representation
+func (s ListObjectVersionsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListObjectVersionsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListObjectVersionsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListObjectVersionsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type ListObjectVersionsOutput struct {
+ _ struct{} `type:"structure"`
+
+ CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"`
+
+ DeleteMarkers []*DeleteMarkerEntry `locationName:"DeleteMarker" type:"list" flattened:"true"`
+
+ Delimiter *string `type:"string"`
+
+ // Encoding type used by Amazon S3 to encode object keys in the response.
+ EncodingType *string `type:"string" enum:"EncodingType"`
+
+ // A flag that indicates whether or not Amazon S3 returned all of the results
+ // that satisfied the search criteria. If your results were truncated, you can
+ // make a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker
+ // response parameters as a starting place in another request to return the
+ // rest of the results.
+ IsTruncated *bool `type:"boolean"`
+
+ // Marks the last Key returned in a truncated response.
+ KeyMarker *string `type:"string"`
+
+ MaxKeys *int64 `type:"integer"`
+
+ Name *string `type:"string"`
+
+ // Use this value for the key marker request parameter in a subsequent request.
+ NextKeyMarker *string `type:"string"`
+
+ // Use this value for the next version id marker parameter in a subsequent request.
+ NextVersionIdMarker *string `type:"string"`
+
+ Prefix *string `type:"string"`
+
+ VersionIdMarker *string `type:"string"`
+
+ Versions []*ObjectVersion `locationName:"Version" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s ListObjectVersionsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListObjectVersionsOutput) GoString() string {
+ return s.String()
+}
+
+type ListObjectsInput struct {
+ _ struct{} `type:"structure"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // A delimiter is a character you use to group keys.
+ Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"`
+
+ // Requests Amazon S3 to encode the object keys in the response and specifies
+ // the encoding method to use. An object key may contain any Unicode character;
+ // however, XML 1.0 parser cannot parse some characters, such as characters
+ // with an ASCII value from 0 to 10. For characters that are not supported in
+ // XML 1.0, you can add this parameter to request that Amazon S3 encode the
+ // keys in the response.
+ EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"`
+
+ // Specifies the key to start with when listing objects in a bucket.
+ Marker *string `location:"querystring" locationName:"marker" type:"string"`
+
+ // Sets the maximum number of keys returned in the response. The response might
+ // contain fewer keys but will never contain more.
+ MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"`
+
+ // Limits the response to keys that begin with the specified prefix.
+ Prefix *string `location:"querystring" locationName:"prefix" type:"string"`
+}
+
+// String returns the string representation
+func (s ListObjectsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListObjectsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListObjectsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListObjectsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type ListObjectsOutput struct {
+ _ struct{} `type:"structure"`
+
+ CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"`
+
+ Contents []*Object `type:"list" flattened:"true"`
+
+ Delimiter *string `type:"string"`
+
+ // Encoding type used by Amazon S3 to encode object keys in the response.
+ EncodingType *string `type:"string" enum:"EncodingType"`
+
+ // A flag that indicates whether or not Amazon S3 returned all of the results
+ // that satisfied the search criteria.
+ IsTruncated *bool `type:"boolean"`
+
+ Marker *string `type:"string"`
+
+ MaxKeys *int64 `type:"integer"`
+
+ Name *string `type:"string"`
+
+ // When response is truncated (the IsTruncated element value in the response
+ // is true), you can use the key name in this field as marker in the subsequent
+ // request to get next set of objects. Amazon S3 lists objects in alphabetical
+ // order Note: This element is returned only if you have delimiter request parameter
+ // specified. If response does not include the NextMaker and it is truncated,
+ // you can use the value of the last Key in the response as the marker in the
+ // subsequent request to get the next set of object keys.
+ NextMarker *string `type:"string"`
+
+ Prefix *string `type:"string"`
+}
+
+// String returns the string representation
+func (s ListObjectsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListObjectsOutput) GoString() string {
+ return s.String()
+}
+
+type ListObjectsV2Input struct {
+ _ struct{} `type:"structure"`
+
+ // Name of the bucket to list.
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // ContinuationToken indicates Amazon S3 that the list is being continued on
+ // this bucket with a token. ContinuationToken is obfuscated and is not a real
+ // key
+ ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"`
+
+ // A delimiter is a character you use to group keys.
+ Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"`
+
+ // Encoding type used by Amazon S3 to encode object keys in the response.
+ EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"`
+
+ // The owner field is not present in listV2 by default, if you want to return
+ // owner field with each key in the result then set the fetch owner field to
+ // true
+ FetchOwner *bool `location:"querystring" locationName:"fetch-owner" type:"boolean"`
+
+ // Sets the maximum number of keys returned in the response. The response might
+ // contain fewer keys but will never contain more.
+ MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"`
+
+ // Limits the response to keys that begin with the specified prefix.
+ Prefix *string `location:"querystring" locationName:"prefix" type:"string"`
+
+ // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts
+ // listing after this specified key. StartAfter can be any key in the bucket
+ StartAfter *string `location:"querystring" locationName:"start-key" type:"string"`
+}
+
+// String returns the string representation
+func (s ListObjectsV2Input) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListObjectsV2Input) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListObjectsV2Input) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListObjectsV2Input"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type ListObjectsV2Output struct {
+ _ struct{} `type:"structure"`
+
+ // CommonPrefixes contains all (if there are any) keys between Prefix and the
+ // next occurrence of the string specified by delimiter
+ CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"`
+
+ // Metadata about each object returned.
+ Contents []*Object `type:"list" flattened:"true"`
+
+ // ContinuationToken indicates Amazon S3 that the list is being continued on
+ // this bucket with a token. ContinuationToken is obfuscated and is not a real
+ // key
+ ContinuationToken *string `type:"string"`
+
+ // A delimiter is a character you use to group keys.
+ Delimiter *string `type:"string"`
+
+ // Encoding type used by Amazon S3 to encode object keys in the response.
+ EncodingType *string `type:"string" enum:"EncodingType"`
+
+ // A flag that indicates whether or not Amazon S3 returned all of the results
+ // that satisfied the search criteria.
+ IsTruncated *bool `type:"boolean"`
+
+ // KeyCount is the number of keys returned with this request. KeyCount will
+ // always be less than equals to MaxKeys field. Say you ask for 50 keys, your
+ // result will include less than equals 50 keys
+ KeyCount *int64 `type:"integer"`
+
+ // Sets the maximum number of keys returned in the response. The response might
+ // contain fewer keys but will never contain more.
+ MaxKeys *int64 `type:"integer"`
+
+ // Name of the bucket to list.
+ Name *string `type:"string"`
+
+ // NextContinuationToken is sent when isTruncated is true which means there
+ // are more keys in the bucket that can be listed. The next list requests to
+ // Amazon S3 can be continued with this NextContinuationToken. NextContinuationToken
+ // is obfuscated and is not a real key
+ NextContinuationToken *string `type:"string"`
+
+ // Limits the response to keys that begin with the specified prefix.
+ Prefix *string `type:"string"`
+
+ // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts
+ // listing after this specified key. StartAfter can be any key in the bucket
+ StartAfter *string `type:"string"`
+}
+
+// String returns the string representation
+func (s ListObjectsV2Output) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListObjectsV2Output) GoString() string {
+ return s.String()
+}
+
+type ListPartsInput struct {
+ _ struct{} `type:"structure"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Sets the maximum number of parts to return.
+ MaxParts *int64 `location:"querystring" locationName:"max-parts" type:"integer"`
+
+ // Specifies the part after which listing should begin. Only parts with higher
+ // part numbers will be listed.
+ PartNumberMarker *int64 `location:"querystring" locationName:"part-number-marker" type:"integer"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // Upload ID identifying the multipart upload whose parts are being listed.
+ UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s ListPartsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListPartsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ListPartsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ListPartsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.UploadId == nil {
+ invalidParams.Add(request.NewErrParamRequired("UploadId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type ListPartsOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Date when multipart upload will become eligible for abort operation by lifecycle.
+ AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"`
+
+ // Id of the lifecycle rule that makes a multipart upload eligible for abort
+ // operation.
+ AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"`
+
+ // Name of the bucket to which the multipart upload was initiated.
+ Bucket *string `type:"string"`
+
+ // Identifies who initiated the multipart upload.
+ Initiator *Initiator `type:"structure"`
+
+ // Indicates whether the returned list of parts is truncated.
+ IsTruncated *bool `type:"boolean"`
+
+ // Object key for which the multipart upload was initiated.
+ Key *string `min:"1" type:"string"`
+
+ // Maximum number of parts that were allowed in the response.
+ MaxParts *int64 `type:"integer"`
+
+ // When a list is truncated, this element specifies the last part in the list,
+ // as well as the value to use for the part-number-marker request parameter
+ // in a subsequent request.
+ NextPartNumberMarker *int64 `type:"integer"`
+
+ Owner *Owner `type:"structure"`
+
+ // Part number after which listing begins.
+ PartNumberMarker *int64 `type:"integer"`
+
+ Parts []*Part `locationName:"Part" type:"list" flattened:"true"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+ // The class of storage used to store the object.
+ StorageClass *string `type:"string" enum:"StorageClass"`
+
+ // Upload ID identifying the multipart upload whose parts are being listed.
+ UploadId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s ListPartsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ListPartsOutput) GoString() string {
+ return s.String()
+}
+
+type LoggingEnabled struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies the bucket where you want Amazon S3 to store server access logs.
+ // You can have your logs delivered to any bucket that you own, including the
+ // same bucket that is being logged. You can also configure multiple buckets
+ // to deliver their logs to the same target bucket. In this case you should
+ // choose a different TargetPrefix for each source bucket so that the delivered
+ // log files can be distinguished by key.
+ TargetBucket *string `type:"string"`
+
+ TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"`
+
+ // This element lets you specify a prefix for the keys that the log files will
+ // be stored under.
+ TargetPrefix *string `type:"string"`
+}
+
+// String returns the string representation
+func (s LoggingEnabled) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s LoggingEnabled) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *LoggingEnabled) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "LoggingEnabled"}
+ if s.TargetGrants != nil {
+ for i, v := range s.TargetGrants {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TargetGrants", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type MultipartUpload struct {
+ _ struct{} `type:"structure"`
+
+ // Date and time at which the multipart upload was initiated.
+ Initiated *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ // Identifies who initiated the multipart upload.
+ Initiator *Initiator `type:"structure"`
+
+ // Key of the object for which the multipart upload was initiated.
+ Key *string `min:"1" type:"string"`
+
+ Owner *Owner `type:"structure"`
+
+ // The class of storage used to store the object.
+ StorageClass *string `type:"string" enum:"StorageClass"`
+
+ // Upload ID that identifies the multipart upload.
+ UploadId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s MultipartUpload) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s MultipartUpload) GoString() string {
+ return s.String()
+}
+
+// Specifies when noncurrent object versions expire. Upon expiration, Amazon
+// S3 permanently deletes the noncurrent object versions. You set this lifecycle
+// configuration action on a bucket that has versioning enabled (or suspended)
+// to request that Amazon S3 delete noncurrent object versions at a specific
+// period in the object's lifetime.
+type NoncurrentVersionExpiration struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies the number of days an object is noncurrent before Amazon S3 can
+ // perform the associated action. For information about the noncurrent days
+ // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent
+ // (/AmazonS3/latest/dev/s3-access-control.html) in the Amazon Simple Storage
+ // Service Developer Guide.
+ NoncurrentDays *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s NoncurrentVersionExpiration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s NoncurrentVersionExpiration) GoString() string {
+ return s.String()
+}
+
+// Container for the transition rule that describes when noncurrent objects
+// transition to the STANDARD_IA or GLACIER storage class. If your bucket is
+// versioning-enabled (or versioning is suspended), you can set this action
+// to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA
+// or GLACIER storage class at a specific period in the object's lifetime.
+type NoncurrentVersionTransition struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies the number of days an object is noncurrent before Amazon S3 can
+ // perform the associated action. For information about the noncurrent days
+ // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent
+ // (/AmazonS3/latest/dev/s3-access-control.html) in the Amazon Simple Storage
+ // Service Developer Guide.
+ NoncurrentDays *int64 `type:"integer"`
+
+ // The class of storage used to store the object.
+ StorageClass *string `type:"string" enum:"TransitionStorageClass"`
+}
+
+// String returns the string representation
+func (s NoncurrentVersionTransition) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s NoncurrentVersionTransition) GoString() string {
+ return s.String()
+}
+
+// Container for specifying the notification configuration of the bucket. If
+// this element is empty, notifications are turned off on the bucket.
+type NotificationConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ LambdaFunctionConfigurations []*LambdaFunctionConfiguration `locationName:"CloudFunctionConfiguration" type:"list" flattened:"true"`
+
+ QueueConfigurations []*QueueConfiguration `locationName:"QueueConfiguration" type:"list" flattened:"true"`
+
+ TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"`
+}
+
+// String returns the string representation
+func (s NotificationConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s NotificationConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *NotificationConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "NotificationConfiguration"}
+ if s.LambdaFunctionConfigurations != nil {
+ for i, v := range s.LambdaFunctionConfigurations {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LambdaFunctionConfigurations", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+ if s.QueueConfigurations != nil {
+ for i, v := range s.QueueConfigurations {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "QueueConfigurations", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+ if s.TopicConfigurations != nil {
+ for i, v := range s.TopicConfigurations {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TopicConfigurations", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type NotificationConfigurationDeprecated struct {
+ _ struct{} `type:"structure"`
+
+ CloudFunctionConfiguration *CloudFunctionConfiguration `type:"structure"`
+
+ QueueConfiguration *QueueConfigurationDeprecated `type:"structure"`
+
+ TopicConfiguration *TopicConfigurationDeprecated `type:"structure"`
+}
+
+// String returns the string representation
+func (s NotificationConfigurationDeprecated) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s NotificationConfigurationDeprecated) GoString() string {
+ return s.String()
+}
+
+// Container for object key name filtering rules. For information about key
+// name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
+// in the Amazon Simple Storage Service Developer Guide.
+type NotificationConfigurationFilter struct {
+ _ struct{} `type:"structure"`
+
+ // Container for object key name prefix and suffix filtering rules.
+ Key *KeyFilter `locationName:"S3Key" type:"structure"`
+}
+
+// String returns the string representation
+func (s NotificationConfigurationFilter) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s NotificationConfigurationFilter) GoString() string {
+ return s.String()
+}
+
+type Object struct {
+ _ struct{} `type:"structure"`
+
+ ETag *string `type:"string"`
+
+ Key *string `min:"1" type:"string"`
+
+ LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ Owner *Owner `type:"structure"`
+
+ Size *int64 `type:"integer"`
+
+ // The class of storage used to store the object.
+ StorageClass *string `type:"string" enum:"ObjectStorageClass"`
+}
+
+// String returns the string representation
+func (s Object) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Object) GoString() string {
+ return s.String()
+}
+
+type ObjectIdentifier struct {
+ _ struct{} `type:"structure"`
+
+ // Key name of the object to delete.
+ Key *string `min:"1" type:"string" required:"true"`
+
+ // VersionId for the specific version of the object to delete.
+ VersionId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s ObjectIdentifier) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ObjectIdentifier) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ObjectIdentifier) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ObjectIdentifier"}
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type ObjectVersion struct {
+ _ struct{} `type:"structure"`
+
+ ETag *string `type:"string"`
+
+ // Specifies whether the object is (true) or is not (false) the latest version
+ // of an object.
+ IsLatest *bool `type:"boolean"`
+
+ // The object key.
+ Key *string `min:"1" type:"string"`
+
+ // Date and time the object was last modified.
+ LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ Owner *Owner `type:"structure"`
+
+ // Size in bytes of the object.
+ Size *int64 `type:"integer"`
+
+ // The class of storage used to store the object.
+ StorageClass *string `type:"string" enum:"ObjectVersionStorageClass"`
+
+ // Version ID of an object.
+ VersionId *string `type:"string"`
+}
+
+// String returns the string representation
+func (s ObjectVersion) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ObjectVersion) GoString() string {
+ return s.String()
+}
+
+type Owner struct {
+ _ struct{} `type:"structure"`
+
+ DisplayName *string `type:"string"`
+
+ ID *string `type:"string"`
+}
+
+// String returns the string representation
+func (s Owner) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Owner) GoString() string {
+ return s.String()
+}
+
+type Part struct {
+ _ struct{} `type:"structure"`
+
+ // Entity tag returned when the part was uploaded.
+ ETag *string `type:"string"`
+
+ // Date and time at which the part was uploaded.
+ LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ // Part number identifying the part. This is a positive integer between 1 and
+ // 10,000.
+ PartNumber *int64 `type:"integer"`
+
+ // Size of the uploaded part data.
+ Size *int64 `type:"integer"`
+}
+
+// String returns the string representation
+func (s Part) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Part) GoString() string {
+ return s.String()
+}
+
+type PutBucketAccelerateConfigurationInput struct {
+ _ struct{} `type:"structure" payload:"AccelerateConfiguration"`
+
+ // Specifies the Accelerate Configuration you want to set for the bucket.
+ AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true"`
+
+ // Name of the bucket for which the accelerate configuration is set.
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketAccelerateConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketAccelerateConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketAccelerateConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketAccelerateConfigurationInput"}
+ if s.AccelerateConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("AccelerateConfiguration"))
+ }
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type PutBucketAccelerateConfigurationOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketAccelerateConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketAccelerateConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+type PutBucketAclInput struct {
+ _ struct{} `type:"structure" payload:"AccessControlPolicy"`
+
+ // The canned ACL to apply to the bucket.
+ ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"`
+
+ AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Allows grantee the read, write, read ACP, and write ACP permissions on the
+ // bucket.
+ GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
+
+ // Allows grantee to list the objects in the bucket.
+ GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
+
+ // Allows grantee to read the bucket ACL.
+ GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
+
+ // Allows grantee to create, overwrite, and delete any object in the bucket.
+ GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"`
+
+ // Allows grantee to write the ACL for the applicable bucket.
+ GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+}
+
+// String returns the string representation
+func (s PutBucketAclInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketAclInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketAclInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketAclInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.AccessControlPolicy != nil {
+ if err := s.AccessControlPolicy.Validate(); err != nil {
+ invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type PutBucketAclOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketAclOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketAclOutput) GoString() string {
+ return s.String()
+}
+
+type PutBucketCorsInput struct {
+ _ struct{} `type:"structure" payload:"CORSConfiguration"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketCorsInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketCorsInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketCorsInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketCorsInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.CORSConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("CORSConfiguration"))
+ }
+ if s.CORSConfiguration != nil {
+ if err := s.CORSConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("CORSConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type PutBucketCorsOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketCorsOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketCorsOutput) GoString() string {
+ return s.String()
+}
+
+type PutBucketLifecycleConfigurationInput struct {
+ _ struct{} `type:"structure" payload:"LifecycleConfiguration"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketLifecycleConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketLifecycleConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketLifecycleConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.LifecycleConfiguration != nil {
+ if err := s.LifecycleConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type PutBucketLifecycleConfigurationOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketLifecycleConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketLifecycleConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+type PutBucketLifecycleInput struct {
+ _ struct{} `type:"structure" payload:"LifecycleConfiguration"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketLifecycleInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketLifecycleInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketLifecycleInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.LifecycleConfiguration != nil {
+ if err := s.LifecycleConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type PutBucketLifecycleOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketLifecycleOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketLifecycleOutput) GoString() string {
+ return s.String()
+}
+
+type PutBucketLoggingInput struct {
+ _ struct{} `type:"structure" payload:"BucketLoggingStatus"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketLoggingInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketLoggingInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketLoggingInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketLoggingInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.BucketLoggingStatus == nil {
+ invalidParams.Add(request.NewErrParamRequired("BucketLoggingStatus"))
+ }
+ if s.BucketLoggingStatus != nil {
+ if err := s.BucketLoggingStatus.Validate(); err != nil {
+ invalidParams.AddNested("BucketLoggingStatus", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type PutBucketLoggingOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketLoggingOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketLoggingOutput) GoString() string {
+ return s.String()
+}
+
+type PutBucketNotificationConfigurationInput struct {
+ _ struct{} `type:"structure" payload:"NotificationConfiguration"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Container for specifying the notification configuration of the bucket. If
+ // this element is empty, notifications are turned off on the bucket.
+ NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketNotificationConfigurationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketNotificationConfigurationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketNotificationConfigurationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationConfigurationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.NotificationConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration"))
+ }
+ if s.NotificationConfiguration != nil {
+ if err := s.NotificationConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("NotificationConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type PutBucketNotificationConfigurationOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketNotificationConfigurationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketNotificationConfigurationOutput) GoString() string {
+ return s.String()
+}
+
+type PutBucketNotificationInput struct {
+ _ struct{} `type:"structure" payload:"NotificationConfiguration"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketNotificationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketNotificationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketNotificationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.NotificationConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type PutBucketNotificationOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketNotificationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketNotificationOutput) GoString() string {
+ return s.String()
+}
+
+type PutBucketPolicyInput struct {
+ _ struct{} `type:"structure" payload:"Policy"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The bucket policy as a JSON document.
+ Policy *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketPolicyInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketPolicyInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketPolicyInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketPolicyInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Policy == nil {
+ invalidParams.Add(request.NewErrParamRequired("Policy"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type PutBucketPolicyOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketPolicyOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketPolicyOutput) GoString() string {
+ return s.String()
+}
+
+type PutBucketReplicationInput struct {
+ _ struct{} `type:"structure" payload:"ReplicationConfiguration"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Container for replication rules. You can add as many as 1,000 rules. Total
+ // replication configuration size can be up to 2 MB.
+ ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketReplicationInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketReplicationInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketReplicationInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketReplicationInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.ReplicationConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("ReplicationConfiguration"))
+ }
+ if s.ReplicationConfiguration != nil {
+ if err := s.ReplicationConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("ReplicationConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type PutBucketReplicationOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketReplicationOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketReplicationOutput) GoString() string {
+ return s.String()
+}
+
+type PutBucketRequestPaymentInput struct {
+ _ struct{} `type:"structure" payload:"RequestPaymentConfiguration"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketRequestPaymentInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketRequestPaymentInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketRequestPaymentInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketRequestPaymentInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.RequestPaymentConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("RequestPaymentConfiguration"))
+ }
+ if s.RequestPaymentConfiguration != nil {
+ if err := s.RequestPaymentConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("RequestPaymentConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type PutBucketRequestPaymentOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketRequestPaymentOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketRequestPaymentOutput) GoString() string {
+ return s.String()
+}
+
+type PutBucketTaggingInput struct {
+ _ struct{} `type:"structure" payload:"Tagging"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketTaggingInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketTaggingInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketTaggingInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketTaggingInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Tagging == nil {
+ invalidParams.Add(request.NewErrParamRequired("Tagging"))
+ }
+ if s.Tagging != nil {
+ if err := s.Tagging.Validate(); err != nil {
+ invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type PutBucketTaggingOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketTaggingOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketTaggingOutput) GoString() string {
+ return s.String()
+}
+
+type PutBucketVersioningInput struct {
+ _ struct{} `type:"structure" payload:"VersioningConfiguration"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The concatenation of the authentication device's serial number, a space,
+ // and the value that is displayed on your authentication device.
+ MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"`
+
+ VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketVersioningInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketVersioningInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketVersioningInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketVersioningInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.VersioningConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("VersioningConfiguration"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type PutBucketVersioningOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketVersioningOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketVersioningOutput) GoString() string {
+ return s.String()
+}
+
+type PutBucketWebsiteInput struct {
+ _ struct{} `type:"structure" payload:"WebsiteConfiguration"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s PutBucketWebsiteInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketWebsiteInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutBucketWebsiteInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutBucketWebsiteInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.WebsiteConfiguration == nil {
+ invalidParams.Add(request.NewErrParamRequired("WebsiteConfiguration"))
+ }
+ if s.WebsiteConfiguration != nil {
+ if err := s.WebsiteConfiguration.Validate(); err != nil {
+ invalidParams.AddNested("WebsiteConfiguration", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type PutBucketWebsiteOutput struct {
+ _ struct{} `type:"structure"`
+}
+
+// String returns the string representation
+func (s PutBucketWebsiteOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutBucketWebsiteOutput) GoString() string {
+ return s.String()
+}
+
+type PutObjectAclInput struct {
+ _ struct{} `type:"structure" payload:"AccessControlPolicy"`
+
+ // The canned ACL to apply to the object.
+ ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
+
+ AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Allows grantee the read, write, read ACP, and write ACP permissions on the
+ // bucket.
+ GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
+
+ // Allows grantee to list the objects in the bucket.
+ GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
+
+ // Allows grantee to read the bucket ACL.
+ GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
+
+ // Allows grantee to create, overwrite, and delete any object in the bucket.
+ GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"`
+
+ // Allows grantee to write the ACL for the applicable bucket.
+ GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // VersionId used to reference a specific version of the object.
+ VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation
+func (s PutObjectAclInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutObjectAclInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutObjectAclInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutObjectAclInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.AccessControlPolicy != nil {
+ if err := s.AccessControlPolicy.Validate(); err != nil {
+ invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type PutObjectAclOutput struct {
+ _ struct{} `type:"structure"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+}
+
+// String returns the string representation
+func (s PutObjectAclOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutObjectAclOutput) GoString() string {
+ return s.String()
+}
+
+type PutObjectInput struct {
+ _ struct{} `type:"structure" payload:"Body"`
+
+ // The canned ACL to apply to the object.
+ ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
+
+ // Object data.
+ Body io.ReadSeeker `type:"blob"`
+
+ // Name of the bucket to which the PUT operation was initiated.
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Specifies caching behavior along the request/reply chain.
+ CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"`
+
+ // Specifies presentational information for the object.
+ ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"`
+
+ // Specifies what content encodings have been applied to the object and thus
+ // what decoding mechanisms must be applied to obtain the media-type referenced
+ // by the Content-Type header field.
+ ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"`
+
+ // The language the content is in.
+ ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"`
+
+ // Size of the body in bytes. This parameter is useful when the size of the
+ // body cannot be determined automatically.
+ ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"`
+
+ // A standard MIME type describing the format of the object data.
+ ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
+
+ // The date and time at which the object is no longer cacheable.
+ Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"`
+
+ // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
+ GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
+
+ // Allows grantee to read the object data and its metadata.
+ GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
+
+ // Allows grantee to read the object ACL.
+ GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
+
+ // Allows grantee to write the ACL for the applicable object.
+ GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
+
+ // Object key for which the PUT operation was initiated.
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // A map of metadata to store with the object in S3.
+ Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+ // data. This value is used to store the object and then it is discarded; Amazon
+ // does not store the encryption key. The key must be appropriate for use with
+ // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
+ // header.
+ SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure the encryption
+ // key was transmitted without error.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
+ // requests for an object protected by AWS KMS will fail if not made via SSL
+ // or using SigV4. Documentation on configuring any of the officially supported
+ // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+ // The type of storage to use for the object. Defaults to 'STANDARD'.
+ StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
+
+ // If the bucket is configured as a website, redirects requests for this object
+ // to another object in the same bucket or to an external URL. Amazon S3 stores
+ // the value of this header in the object metadata.
+ WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"`
+}
+
+// String returns the string representation
+func (s PutObjectInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutObjectInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *PutObjectInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "PutObjectInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type PutObjectOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Entity tag for the uploaded object.
+ ETag *string `location:"header" locationName:"ETag" type:"string"`
+
+ // If the object expiration is configured, this will contain the expiration
+ // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.
+ Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm
+ // used.
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // If present, specifies the ID of the AWS Key Management Service (KMS) master
+ // encryption key that was used for the object.
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+
+ // Version of the object.
+ VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"`
+}
+
+// String returns the string representation
+func (s PutObjectOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s PutObjectOutput) GoString() string {
+ return s.String()
+}
+
+// Container for specifying an configuration when you want Amazon S3 to publish
+// events to an Amazon Simple Queue Service (Amazon SQS) queue.
+type QueueConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"`
+
+ // Container for object key name filtering rules. For information about key
+ // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
+ // in the Amazon Simple Storage Service Developer Guide.
+ Filter *NotificationConfigurationFilter `type:"structure"`
+
+ // Optional unique identifier for configurations in a notification configuration.
+ // If you don't provide one, Amazon S3 will assign an ID.
+ Id *string `type:"string"`
+
+ // Amazon SQS queue ARN to which Amazon S3 will publish a message when it detects
+ // events of specified type.
+ QueueArn *string `locationName:"Queue" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s QueueConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s QueueConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *QueueConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "QueueConfiguration"}
+ if s.Events == nil {
+ invalidParams.Add(request.NewErrParamRequired("Events"))
+ }
+ if s.QueueArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("QueueArn"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type QueueConfigurationDeprecated struct {
+ _ struct{} `type:"structure"`
+
+ // Bucket event for which to send notifications.
+ Event *string `deprecated:"true" type:"string" enum:"Event"`
+
+ Events []*string `locationName:"Event" type:"list" flattened:"true"`
+
+ // Optional unique identifier for configurations in a notification configuration.
+ // If you don't provide one, Amazon S3 will assign an ID.
+ Id *string `type:"string"`
+
+ Queue *string `type:"string"`
+}
+
+// String returns the string representation
+func (s QueueConfigurationDeprecated) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s QueueConfigurationDeprecated) GoString() string {
+ return s.String()
+}
+
+type Redirect struct {
+ _ struct{} `type:"structure"`
+
+ // The host name to use in the redirect request.
+ HostName *string `type:"string"`
+
+ // The HTTP redirect code to use on the response. Not required if one of the
+ // siblings is present.
+ HttpRedirectCode *string `type:"string"`
+
+ // Protocol to use (http, https) when redirecting requests. The default is the
+ // protocol that is used in the original request.
+ Protocol *string `type:"string" enum:"Protocol"`
+
+ // The object key prefix to use in the redirect request. For example, to redirect
+ // requests for all pages with prefix docs/ (objects in the docs/ folder) to
+ // documents/, you can set a condition block with KeyPrefixEquals set to docs/
+ // and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required
+ // if one of the siblings is present. Can be present only if ReplaceKeyWith
+ // is not provided.
+ ReplaceKeyPrefixWith *string `type:"string"`
+
+ // The specific object key to use in the redirect request. For example, redirect
+ // request to error.html. Not required if one of the sibling is present. Can
+ // be present only if ReplaceKeyPrefixWith is not provided.
+ ReplaceKeyWith *string `type:"string"`
+}
+
+// String returns the string representation
+func (s Redirect) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Redirect) GoString() string {
+ return s.String()
+}
+
+type RedirectAllRequestsTo struct {
+ _ struct{} `type:"structure"`
+
+ // Name of the host where requests will be redirected.
+ HostName *string `type:"string" required:"true"`
+
+ // Protocol to use (http, https) when redirecting requests. The default is the
+ // protocol that is used in the original request.
+ Protocol *string `type:"string" enum:"Protocol"`
+}
+
+// String returns the string representation
+func (s RedirectAllRequestsTo) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RedirectAllRequestsTo) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *RedirectAllRequestsTo) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "RedirectAllRequestsTo"}
+ if s.HostName == nil {
+ invalidParams.Add(request.NewErrParamRequired("HostName"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Container for replication rules. You can add as many as 1,000 rules. Total
+// replication configuration size can be up to 2 MB.
+type ReplicationConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // Amazon Resource Name (ARN) of an IAM role for Amazon S3 to assume when replicating
+ // the objects.
+ Role *string `type:"string" required:"true"`
+
+ // Container for information about a particular replication rule. Replication
+ // configuration must have at least one rule and can contain up to 1,000 rules.
+ Rules []*ReplicationRule `locationName:"Rule" type:"list" flattened:"true" required:"true"`
+}
+
+// String returns the string representation
+func (s ReplicationConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ReplicationConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ReplicationConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ReplicationConfiguration"}
+ if s.Role == nil {
+ invalidParams.Add(request.NewErrParamRequired("Role"))
+ }
+ if s.Rules == nil {
+ invalidParams.Add(request.NewErrParamRequired("Rules"))
+ }
+ if s.Rules != nil {
+ for i, v := range s.Rules {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type ReplicationRule struct {
+ _ struct{} `type:"structure"`
+
+ Destination *Destination `type:"structure" required:"true"`
+
+ // Unique identifier for the rule. The value cannot be longer than 255 characters.
+ ID *string `type:"string"`
+
+ // Object keyname prefix identifying one or more objects to which the rule applies.
+ // Maximum prefix length can be up to 1,024 characters. Overlapping prefixes
+ // are not supported.
+ Prefix *string `type:"string" required:"true"`
+
+ // The rule is ignored if status is not Enabled.
+ Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"`
+}
+
+// String returns the string representation
+func (s ReplicationRule) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s ReplicationRule) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *ReplicationRule) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "ReplicationRule"}
+ if s.Destination == nil {
+ invalidParams.Add(request.NewErrParamRequired("Destination"))
+ }
+ if s.Prefix == nil {
+ invalidParams.Add(request.NewErrParamRequired("Prefix"))
+ }
+ if s.Status == nil {
+ invalidParams.Add(request.NewErrParamRequired("Status"))
+ }
+ if s.Destination != nil {
+ if err := s.Destination.Validate(); err != nil {
+ invalidParams.AddNested("Destination", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type RequestPaymentConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies who pays for the download and request fees.
+ Payer *string `type:"string" required:"true" enum:"Payer"`
+}
+
+// String returns the string representation
+func (s RequestPaymentConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RequestPaymentConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *RequestPaymentConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "RequestPaymentConfiguration"}
+ if s.Payer == nil {
+ invalidParams.Add(request.NewErrParamRequired("Payer"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type RestoreObjectInput struct {
+ _ struct{} `type:"structure" payload:"RestoreRequest"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure"`
+
+ VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
+}
+
+// String returns the string representation
+func (s RestoreObjectInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RestoreObjectInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *RestoreObjectInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "RestoreObjectInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.RestoreRequest != nil {
+ if err := s.RestoreRequest.Validate(); err != nil {
+ invalidParams.AddNested("RestoreRequest", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type RestoreObjectOutput struct {
+ _ struct{} `type:"structure"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+}
+
+// String returns the string representation
+func (s RestoreObjectOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RestoreObjectOutput) GoString() string {
+ return s.String()
+}
+
+type RestoreRequest struct {
+ _ struct{} `type:"structure"`
+
+ // Lifetime of the active copy in days
+ Days *int64 `type:"integer" required:"true"`
+}
+
+// String returns the string representation
+func (s RestoreRequest) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RestoreRequest) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *RestoreRequest) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "RestoreRequest"}
+ if s.Days == nil {
+ invalidParams.Add(request.NewErrParamRequired("Days"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type RoutingRule struct {
+ _ struct{} `type:"structure"`
+
+ // A container for describing a condition that must be met for the specified
+ // redirect to apply. For example, 1. If request is for pages in the /docs folder,
+ // redirect to the /documents folder. 2. If request results in HTTP error 4xx,
+ // redirect request to another host where you might process the error.
+ Condition *Condition `type:"structure"`
+
+ // Container for redirect information. You can redirect requests to another
+ // host, to another page, or with another protocol. In the event of an error,
+ // you can can specify a different error code to return.
+ Redirect *Redirect `type:"structure" required:"true"`
+}
+
+// String returns the string representation
+func (s RoutingRule) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s RoutingRule) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *RoutingRule) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "RoutingRule"}
+ if s.Redirect == nil {
+ invalidParams.Add(request.NewErrParamRequired("Redirect"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type Rule struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies the days since the initiation of an Incomplete Multipart Upload
+ // that Lifecycle will wait before permanently removing all parts of the upload.
+ AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"`
+
+ Expiration *LifecycleExpiration `type:"structure"`
+
+ // Unique identifier for the rule. The value cannot be longer than 255 characters.
+ ID *string `type:"string"`
+
+ // Specifies when noncurrent object versions expire. Upon expiration, Amazon
+ // S3 permanently deletes the noncurrent object versions. You set this lifecycle
+ // configuration action on a bucket that has versioning enabled (or suspended)
+ // to request that Amazon S3 delete noncurrent object versions at a specific
+ // period in the object's lifetime.
+ NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"`
+
+ // Container for the transition rule that describes when noncurrent objects
+ // transition to the STANDARD_IA or GLACIER storage class. If your bucket is
+ // versioning-enabled (or versioning is suspended), you can set this action
+ // to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA
+ // or GLACIER storage class at a specific period in the object's lifetime.
+ NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"`
+
+ // Prefix identifying one or more objects to which the rule applies.
+ Prefix *string `type:"string" required:"true"`
+
+ // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule
+ // is not currently being applied.
+ Status *string `type:"string" required:"true" enum:"ExpirationStatus"`
+
+ Transition *Transition `type:"structure"`
+}
+
+// String returns the string representation
+func (s Rule) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Rule) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Rule) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "Rule"}
+ if s.Prefix == nil {
+ invalidParams.Add(request.NewErrParamRequired("Prefix"))
+ }
+ if s.Status == nil {
+ invalidParams.Add(request.NewErrParamRequired("Status"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type Tag struct {
+ _ struct{} `type:"structure"`
+
+ // Name of the tag.
+ Key *string `min:"1" type:"string" required:"true"`
+
+ // Value of the tag.
+ Value *string `type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s Tag) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Tag) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Tag) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "Tag"}
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.Value == nil {
+ invalidParams.Add(request.NewErrParamRequired("Value"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type Tagging struct {
+ _ struct{} `type:"structure"`
+
+ TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"`
+}
+
+// String returns the string representation
+func (s Tagging) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Tagging) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *Tagging) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "Tagging"}
+ if s.TagSet == nil {
+ invalidParams.Add(request.NewErrParamRequired("TagSet"))
+ }
+ if s.TagSet != nil {
+ for i, v := range s.TagSet {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TagSet", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type TargetGrant struct {
+ _ struct{} `type:"structure"`
+
+ Grantee *Grantee `type:"structure"`
+
+ // Logging permissions assigned to the Grantee for the bucket.
+ Permission *string `type:"string" enum:"BucketLogsPermission"`
+}
+
+// String returns the string representation
+func (s TargetGrant) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s TargetGrant) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *TargetGrant) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "TargetGrant"}
+ if s.Grantee != nil {
+ if err := s.Grantee.Validate(); err != nil {
+ invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams))
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+// Container for specifying the configuration when you want Amazon S3 to publish
+// events to an Amazon Simple Notification Service (Amazon SNS) topic.
+type TopicConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"`
+
+ // Container for object key name filtering rules. For information about key
+ // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html)
+ // in the Amazon Simple Storage Service Developer Guide.
+ Filter *NotificationConfigurationFilter `type:"structure"`
+
+ // Optional unique identifier for configurations in a notification configuration.
+ // If you don't provide one, Amazon S3 will assign an ID.
+ Id *string `type:"string"`
+
+ // Amazon SNS topic ARN to which Amazon S3 will publish a message when it detects
+ // events of specified type.
+ TopicArn *string `locationName:"Topic" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s TopicConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s TopicConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *TopicConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "TopicConfiguration"}
+ if s.Events == nil {
+ invalidParams.Add(request.NewErrParamRequired("Events"))
+ }
+ if s.TopicArn == nil {
+ invalidParams.Add(request.NewErrParamRequired("TopicArn"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type TopicConfigurationDeprecated struct {
+ _ struct{} `type:"structure"`
+
+ // Bucket event for which to send notifications.
+ Event *string `deprecated:"true" type:"string" enum:"Event"`
+
+ Events []*string `locationName:"Event" type:"list" flattened:"true"`
+
+ // Optional unique identifier for configurations in a notification configuration.
+ // If you don't provide one, Amazon S3 will assign an ID.
+ Id *string `type:"string"`
+
+ // Amazon SNS topic to which Amazon S3 will publish a message to report the
+ // specified events for the bucket.
+ Topic *string `type:"string"`
+}
+
+// String returns the string representation
+func (s TopicConfigurationDeprecated) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s TopicConfigurationDeprecated) GoString() string {
+ return s.String()
+}
+
+type Transition struct {
+ _ struct{} `type:"structure"`
+
+ // Indicates at what date the object is to be moved or deleted. Should be in
+ // GMT ISO 8601 Format.
+ Date *time.Time `type:"timestamp" timestampFormat:"iso8601"`
+
+ // Indicates the lifetime, in days, of the objects that are subject to the rule.
+ // The value must be a non-zero positive integer.
+ Days *int64 `type:"integer"`
+
+ // The class of storage used to store the object.
+ StorageClass *string `type:"string" enum:"TransitionStorageClass"`
+}
+
+// String returns the string representation
+func (s Transition) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s Transition) GoString() string {
+ return s.String()
+}
+
+type UploadPartCopyInput struct {
+ _ struct{} `type:"structure"`
+
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // The name of the source bucket and key name of the source object, separated
+ // by a slash (/). Must be URL-encoded.
+ CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"`
+
+ // Copies the object if its entity tag (ETag) matches the specified tag.
+ CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"`
+
+ // Copies the object if it has been modified since the specified time.
+ CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"`
+
+ // Copies the object if its entity tag (ETag) is different than the specified
+ // ETag.
+ CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"`
+
+ // Copies the object if it hasn't been modified since the specified time.
+ CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"`
+
+ // The range of bytes to copy from the source object. The range value must use
+ // the form bytes=first-last, where the first and last are the zero-based byte
+ // offsets to copy. For example, bytes=0-9 indicates that you want to copy the
+ // first ten bytes of the source. You can copy a range only if the source object
+ // is greater than 5 GB.
+ CopySourceRange *string `location:"header" locationName:"x-amz-copy-source-range" type:"string"`
+
+ // Specifies the algorithm to use when decrypting the source object (e.g., AES256).
+ CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt
+ // the source object. The encryption key provided in this header must be one
+ // that was used when the source object was created.
+ CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure the encryption
+ // key was transmitted without error.
+ CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"`
+
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Part number of part being copied. This is a positive integer between 1 and
+ // 10,000.
+ PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+ // data. This value is used to store the object and then it is discarded; Amazon
+ // does not store the encryption key. The key must be appropriate for use with
+ // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
+ // header. This must be the same encryption key specified in the initiate multipart
+ // upload request.
+ SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure the encryption
+ // key was transmitted without error.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // Upload ID identifying the multipart upload whose part is being copied.
+ UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s UploadPartCopyInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UploadPartCopyInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *UploadPartCopyInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "UploadPartCopyInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.CopySource == nil {
+ invalidParams.Add(request.NewErrParamRequired("CopySource"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.PartNumber == nil {
+ invalidParams.Add(request.NewErrParamRequired("PartNumber"))
+ }
+ if s.UploadId == nil {
+ invalidParams.Add(request.NewErrParamRequired("UploadId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type UploadPartCopyOutput struct {
+ _ struct{} `type:"structure" payload:"CopyPartResult"`
+
+ CopyPartResult *CopyPartResult `type:"structure"`
+
+ // The version of the source object that was copied, if you have enabled versioning
+ // on the source bucket.
+ CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm
+ // used.
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // If present, specifies the ID of the AWS Key Management Service (KMS) master
+ // encryption key that was used for the object.
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+}
+
+// String returns the string representation
+func (s UploadPartCopyOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UploadPartCopyOutput) GoString() string {
+ return s.String()
+}
+
+type UploadPartInput struct {
+ _ struct{} `type:"structure" payload:"Body"`
+
+ // Object data.
+ Body io.ReadSeeker `type:"blob"`
+
+ // Name of the bucket to which the multipart upload was initiated.
+ Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
+
+ // Size of the body in bytes. This parameter is useful when the size of the
+ // body cannot be determined automatically.
+ ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"`
+
+ // Object key for which the multipart upload was initiated.
+ Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
+
+ // Part number of part being uploaded. This is a positive integer between 1
+ // and 10,000.
+ PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"`
+
+ // Confirms that the requester knows that she or he will be charged for the
+ // request. Bucket owners need not specify this parameter in their requests.
+ // Documentation on downloading objects from requester pays buckets can be found
+ // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+ RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
+
+ // Specifies the algorithm to use to when encrypting the object (e.g., AES256).
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
+ // data. This value is used to store the object and then it is discarded; Amazon
+ // does not store the encryption key. The key must be appropriate for use with
+ // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm
+ // header. This must be the same encryption key specified in the initiate multipart
+ // upload request.
+ SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"`
+
+ // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ // Amazon S3 uses this header for a message integrity check to ensure the encryption
+ // key was transmitted without error.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // Upload ID identifying the multipart upload whose part is being uploaded.
+ UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"`
+}
+
+// String returns the string representation
+func (s UploadPartInput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UploadPartInput) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *UploadPartInput) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "UploadPartInput"}
+ if s.Bucket == nil {
+ invalidParams.Add(request.NewErrParamRequired("Bucket"))
+ }
+ if s.Key == nil {
+ invalidParams.Add(request.NewErrParamRequired("Key"))
+ }
+ if s.Key != nil && len(*s.Key) < 1 {
+ invalidParams.Add(request.NewErrParamMinLen("Key", 1))
+ }
+ if s.PartNumber == nil {
+ invalidParams.Add(request.NewErrParamRequired("PartNumber"))
+ }
+ if s.UploadId == nil {
+ invalidParams.Add(request.NewErrParamRequired("UploadId"))
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+type UploadPartOutput struct {
+ _ struct{} `type:"structure"`
+
+ // Entity tag for the uploaded object.
+ ETag *string `location:"header" locationName:"ETag" type:"string"`
+
+ // If present, indicates that the requester was successfully charged for the
+ // request.
+ RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header confirming the encryption algorithm
+ // used.
+ SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"`
+
+ // If server-side encryption with a customer-provided encryption key was requested,
+ // the response will include this header to provide round trip message integrity
+ // verification of the customer-provided encryption key.
+ SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"`
+
+ // If present, specifies the ID of the AWS Key Management Service (KMS) master
+ // encryption key that was used for the object.
+ SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"`
+
+ // The Server-side encryption algorithm used when storing this object in S3
+ // (e.g., AES256, aws:kms).
+ ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
+}
+
+// String returns the string representation
+func (s UploadPartOutput) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s UploadPartOutput) GoString() string {
+ return s.String()
+}
+
+type VersioningConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ // Specifies whether MFA delete is enabled in the bucket versioning configuration.
+ // This element is only returned if the bucket has been configured with MFA
+ // delete. If the bucket has never been so configured, this element is not returned.
+ MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADelete"`
+
+ // The versioning state of the bucket.
+ Status *string `type:"string" enum:"BucketVersioningStatus"`
+}
+
+// String returns the string representation
+func (s VersioningConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s VersioningConfiguration) GoString() string {
+ return s.String()
+}
+
+type WebsiteConfiguration struct {
+ _ struct{} `type:"structure"`
+
+ ErrorDocument *ErrorDocument `type:"structure"`
+
+ IndexDocument *IndexDocument `type:"structure"`
+
+ RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"`
+
+ RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"`
+}
+
+// String returns the string representation
+func (s WebsiteConfiguration) String() string {
+ return awsutil.Prettify(s)
+}
+
+// GoString returns the string representation
+func (s WebsiteConfiguration) GoString() string {
+ return s.String()
+}
+
+// Validate inspects the fields of the type to determine if they are valid.
+func (s *WebsiteConfiguration) Validate() error {
+ invalidParams := request.ErrInvalidParams{Context: "WebsiteConfiguration"}
+ if s.ErrorDocument != nil {
+ if err := s.ErrorDocument.Validate(); err != nil {
+ invalidParams.AddNested("ErrorDocument", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.IndexDocument != nil {
+ if err := s.IndexDocument.Validate(); err != nil {
+ invalidParams.AddNested("IndexDocument", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.RedirectAllRequestsTo != nil {
+ if err := s.RedirectAllRequestsTo.Validate(); err != nil {
+ invalidParams.AddNested("RedirectAllRequestsTo", err.(request.ErrInvalidParams))
+ }
+ }
+ if s.RoutingRules != nil {
+ for i, v := range s.RoutingRules {
+ if v == nil {
+ continue
+ }
+ if err := v.Validate(); err != nil {
+ invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RoutingRules", i), err.(request.ErrInvalidParams))
+ }
+ }
+ }
+
+ if invalidParams.Len() > 0 {
+ return invalidParams
+ }
+ return nil
+}
+
+const (
+ // @enum BucketAccelerateStatus
+ BucketAccelerateStatusEnabled = "Enabled"
+ // @enum BucketAccelerateStatus
+ BucketAccelerateStatusSuspended = "Suspended"
+)
+
+const (
+ // @enum BucketCannedACL
+ BucketCannedACLPrivate = "private"
+ // @enum BucketCannedACL
+ BucketCannedACLPublicRead = "public-read"
+ // @enum BucketCannedACL
+ BucketCannedACLPublicReadWrite = "public-read-write"
+ // @enum BucketCannedACL
+ BucketCannedACLAuthenticatedRead = "authenticated-read"
+)
+
+const (
+ // @enum BucketLocationConstraint
+ BucketLocationConstraintEu = "EU"
+ // @enum BucketLocationConstraint
+ BucketLocationConstraintEuWest1 = "eu-west-1"
+ // @enum BucketLocationConstraint
+ BucketLocationConstraintUsWest1 = "us-west-1"
+ // @enum BucketLocationConstraint
+ BucketLocationConstraintUsWest2 = "us-west-2"
+ // @enum BucketLocationConstraint
+ BucketLocationConstraintApSoutheast1 = "ap-southeast-1"
+ // @enum BucketLocationConstraint
+ BucketLocationConstraintApSoutheast2 = "ap-southeast-2"
+ // @enum BucketLocationConstraint
+ BucketLocationConstraintApNortheast1 = "ap-northeast-1"
+ // @enum BucketLocationConstraint
+ BucketLocationConstraintSaEast1 = "sa-east-1"
+ // @enum BucketLocationConstraint
+ BucketLocationConstraintCnNorth1 = "cn-north-1"
+ // @enum BucketLocationConstraint
+ BucketLocationConstraintEuCentral1 = "eu-central-1"
+)
+
+const (
+ // @enum BucketLogsPermission
+ BucketLogsPermissionFullControl = "FULL_CONTROL"
+ // @enum BucketLogsPermission
+ BucketLogsPermissionRead = "READ"
+ // @enum BucketLogsPermission
+ BucketLogsPermissionWrite = "WRITE"
+)
+
+const (
+ // @enum BucketVersioningStatus
+ BucketVersioningStatusEnabled = "Enabled"
+ // @enum BucketVersioningStatus
+ BucketVersioningStatusSuspended = "Suspended"
+)
+
+// Requests Amazon S3 to encode the object keys in the response and specifies
+// the encoding method to use. An object key may contain any Unicode character;
+// however, XML 1.0 parser cannot parse some characters, such as characters
+// with an ASCII value from 0 to 10. For characters that are not supported in
+// XML 1.0, you can add this parameter to request that Amazon S3 encode the
+// keys in the response.
+const (
+ // @enum EncodingType
+ EncodingTypeUrl = "url"
+)
+
+// Bucket event for which to send notifications.
+const (
+ // @enum Event
+ EventS3ReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject"
+ // @enum Event
+ EventS3ObjectCreated = "s3:ObjectCreated:*"
+ // @enum Event
+ EventS3ObjectCreatedPut = "s3:ObjectCreated:Put"
+ // @enum Event
+ EventS3ObjectCreatedPost = "s3:ObjectCreated:Post"
+ // @enum Event
+ EventS3ObjectCreatedCopy = "s3:ObjectCreated:Copy"
+ // @enum Event
+ EventS3ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload"
+ // @enum Event
+ EventS3ObjectRemoved = "s3:ObjectRemoved:*"
+ // @enum Event
+ EventS3ObjectRemovedDelete = "s3:ObjectRemoved:Delete"
+ // @enum Event
+ EventS3ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated"
+)
+
+const (
+ // @enum ExpirationStatus
+ ExpirationStatusEnabled = "Enabled"
+ // @enum ExpirationStatus
+ ExpirationStatusDisabled = "Disabled"
+)
+
+const (
+ // @enum FilterRuleName
+ FilterRuleNamePrefix = "prefix"
+ // @enum FilterRuleName
+ FilterRuleNameSuffix = "suffix"
+)
+
+const (
+ // @enum MFADelete
+ MFADeleteEnabled = "Enabled"
+ // @enum MFADelete
+ MFADeleteDisabled = "Disabled"
+)
+
+const (
+ // @enum MFADeleteStatus
+ MFADeleteStatusEnabled = "Enabled"
+ // @enum MFADeleteStatus
+ MFADeleteStatusDisabled = "Disabled"
+)
+
+const (
+ // @enum MetadataDirective
+ MetadataDirectiveCopy = "COPY"
+ // @enum MetadataDirective
+ MetadataDirectiveReplace = "REPLACE"
+)
+
+const (
+ // @enum ObjectCannedACL
+ ObjectCannedACLPrivate = "private"
+ // @enum ObjectCannedACL
+ ObjectCannedACLPublicRead = "public-read"
+ // @enum ObjectCannedACL
+ ObjectCannedACLPublicReadWrite = "public-read-write"
+ // @enum ObjectCannedACL
+ ObjectCannedACLAuthenticatedRead = "authenticated-read"
+ // @enum ObjectCannedACL
+ ObjectCannedACLAwsExecRead = "aws-exec-read"
+ // @enum ObjectCannedACL
+ ObjectCannedACLBucketOwnerRead = "bucket-owner-read"
+ // @enum ObjectCannedACL
+ ObjectCannedACLBucketOwnerFullControl = "bucket-owner-full-control"
+)
+
+const (
+ // @enum ObjectStorageClass
+ ObjectStorageClassStandard = "STANDARD"
+ // @enum ObjectStorageClass
+ ObjectStorageClassReducedRedundancy = "REDUCED_REDUNDANCY"
+ // @enum ObjectStorageClass
+ ObjectStorageClassGlacier = "GLACIER"
+)
+
+const (
+ // @enum ObjectVersionStorageClass
+ ObjectVersionStorageClassStandard = "STANDARD"
+)
+
+const (
+ // @enum Payer
+ PayerRequester = "Requester"
+ // @enum Payer
+ PayerBucketOwner = "BucketOwner"
+)
+
+const (
+ // @enum Permission
+ PermissionFullControl = "FULL_CONTROL"
+ // @enum Permission
+ PermissionWrite = "WRITE"
+ // @enum Permission
+ PermissionWriteAcp = "WRITE_ACP"
+ // @enum Permission
+ PermissionRead = "READ"
+ // @enum Permission
+ PermissionReadAcp = "READ_ACP"
+)
+
+const (
+ // @enum Protocol
+ ProtocolHttp = "http"
+ // @enum Protocol
+ ProtocolHttps = "https"
+)
+
+const (
+ // @enum ReplicationRuleStatus
+ ReplicationRuleStatusEnabled = "Enabled"
+ // @enum ReplicationRuleStatus
+ ReplicationRuleStatusDisabled = "Disabled"
+)
+
+const (
+ // @enum ReplicationStatus
+ ReplicationStatusComplete = "COMPLETE"
+ // @enum ReplicationStatus
+ ReplicationStatusPending = "PENDING"
+ // @enum ReplicationStatus
+ ReplicationStatusFailed = "FAILED"
+ // @enum ReplicationStatus
+ ReplicationStatusReplica = "REPLICA"
+)
+
+// If present, indicates that the requester was successfully charged for the
+// request.
+const (
+ // @enum RequestCharged
+ RequestChargedRequester = "requester"
+)
+
+// Confirms that the requester knows that she or he will be charged for the
+// request. Bucket owners need not specify this parameter in their requests.
+// Documentation on downloading objects from requester pays buckets can be found
+// at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
+const (
+ // @enum RequestPayer
+ RequestPayerRequester = "requester"
+)
+
+const (
+ // @enum ServerSideEncryption
+ ServerSideEncryptionAes256 = "AES256"
+ // @enum ServerSideEncryption
+ ServerSideEncryptionAwsKms = "aws:kms"
+)
+
+const (
+ // @enum StorageClass
+ StorageClassStandard = "STANDARD"
+ // @enum StorageClass
+ StorageClassReducedRedundancy = "REDUCED_REDUNDANCY"
+ // @enum StorageClass
+ StorageClassStandardIa = "STANDARD_IA"
+)
+
+const (
+ // @enum TransitionStorageClass
+ TransitionStorageClassGlacier = "GLACIER"
+ // @enum TransitionStorageClass
+ TransitionStorageClassStandardIa = "STANDARD_IA"
+)
+
+const (
+ // @enum Type
+ TypeCanonicalUser = "CanonicalUser"
+ // @enum Type
+ TypeAmazonCustomerByEmail = "AmazonCustomerByEmail"
+ // @enum Type
+ TypeGroup = "Group"
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go
new file mode 100644
index 0000000..c3a2702
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go
@@ -0,0 +1,43 @@
+package s3
+
+import (
+ "io/ioutil"
+ "regexp"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+var reBucketLocation = regexp.MustCompile(`>([^<>]+)<\/Location`)
+
+func buildGetBucketLocation(r *request.Request) {
+ if r.DataFilled() {
+ out := r.Data.(*GetBucketLocationOutput)
+ b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "failed reading response body", err)
+ return
+ }
+
+ match := reBucketLocation.FindSubmatch(b)
+ if len(match) > 1 {
+ loc := string(match[1])
+ out.LocationConstraint = &loc
+ }
+ }
+}
+
+func populateLocationConstraint(r *request.Request) {
+ if r.ParamsFilled() && aws.StringValue(r.Config.Region) != "us-east-1" {
+ in := r.Params.(*CreateBucketInput)
+ if in.CreateBucketConfiguration == nil {
+ r.Params = awsutil.CopyOf(r.Params)
+ in = r.Params.(*CreateBucketInput)
+ in.CreateBucketConfiguration = &CreateBucketConfiguration{
+ LocationConstraint: r.Config.Region,
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go b/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go
new file mode 100644
index 0000000..9fc5df9
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go
@@ -0,0 +1,36 @@
+package s3
+
+import (
+ "crypto/md5"
+ "encoding/base64"
+ "io"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// contentMD5 computes and sets the HTTP Content-MD5 header for requests that
+// require it.
+func contentMD5(r *request.Request) {
+ h := md5.New()
+
+ // hash the body. seek back to the first position after reading to reset
+ // the body for transmission. copy errors may be assumed to be from the
+ // body.
+ _, err := io.Copy(h, r.Body)
+ if err != nil {
+ r.Error = awserr.New("ContentMD5", "failed to read body", err)
+ return
+ }
+ _, err = r.Body.Seek(0, 0)
+ if err != nil {
+ r.Error = awserr.New("ContentMD5", "failed to seek body", err)
+ return
+ }
+
+ // encode the md5 checksum in base64 and set the request header.
+ sum := h.Sum(nil)
+ sum64 := make([]byte, base64.StdEncoding.EncodedLen(len(sum)))
+ base64.StdEncoding.Encode(sum64, sum)
+ r.HTTPRequest.Header.Set("Content-MD5", string(sum64))
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
new file mode 100644
index 0000000..8463347
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
@@ -0,0 +1,46 @@
+package s3
+
+import (
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+func init() {
+ initClient = defaultInitClientFn
+ initRequest = defaultInitRequestFn
+}
+
+func defaultInitClientFn(c *client.Client) {
+ // Support building custom endpoints based on config
+ c.Handlers.Build.PushFront(updateEndpointForS3Config)
+
+ // Require SSL when using SSE keys
+ c.Handlers.Validate.PushBack(validateSSERequiresSSL)
+ c.Handlers.Build.PushBack(computeSSEKeys)
+
+ // S3 uses custom error unmarshaling logic
+ c.Handlers.UnmarshalError.Clear()
+ c.Handlers.UnmarshalError.PushBack(unmarshalError)
+}
+
+func defaultInitRequestFn(r *request.Request) {
+ // Add reuest handlers for specific platforms.
+ // e.g. 100-continue support for PUT requests using Go 1.6
+ platformRequestHandlers(r)
+
+ switch r.Operation.Name {
+ case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy,
+ opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration,
+ opPutBucketReplication:
+ // These S3 operations require Content-MD5 to be set
+ r.Handlers.Build.PushBack(contentMD5)
+ case opGetBucketLocation:
+ // GetBucketLocation has custom parsing logic
+ r.Handlers.Unmarshal.PushFront(buildGetBucketLocation)
+ case opCreateBucket:
+ // Auto-populate LocationConstraint with current region
+ r.Handlers.Validate.PushFront(populateLocationConstraint)
+ case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload:
+ r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError)
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go
new file mode 100644
index 0000000..5172929
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go
@@ -0,0 +1,165 @@
+package s3
+
+import (
+ "fmt"
+ "net/url"
+ "regexp"
+ "strings"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+// an operationBlacklist is a list of operation names that should a
+// request handler should not be executed with.
+type operationBlacklist []string
+
+// Continue will return true of the Request's operation name is not
+// in the blacklist. False otherwise.
+func (b operationBlacklist) Continue(r *request.Request) bool {
+ for i := 0; i < len(b); i++ {
+ if b[i] == r.Operation.Name {
+ return false
+ }
+ }
+ return true
+}
+
+var accelerateOpBlacklist = operationBlacklist{
+ opListBuckets, opCreateBucket, opDeleteBucket,
+}
+
+// Request handler to automatically add the bucket name to the endpoint domain
+// if possible. This style of bucket is valid for all bucket names which are
+// DNS compatible and do not contain "."
+func updateEndpointForS3Config(r *request.Request) {
+ forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle)
+ accelerate := aws.BoolValue(r.Config.S3UseAccelerate)
+
+ if accelerate && accelerateOpBlacklist.Continue(r) {
+ if forceHostStyle {
+ if r.Config.Logger != nil {
+ r.Config.Logger.Log("ERROR: aws.Config.S3UseAccelerate is not compatible with aws.Config.S3ForcePathStyle, ignoring S3ForcePathStyle.")
+ }
+ }
+ updateEndpointForAccelerate(r)
+ } else if !forceHostStyle && r.Operation.Name != opGetBucketLocation {
+ updateEndpointForHostStyle(r)
+ }
+}
+
+func updateEndpointForHostStyle(r *request.Request) {
+ bucket, ok := bucketNameFromReqParams(r.Params)
+ if !ok {
+ // Ignore operation requests if the bucketname was not provided
+ // if this is an input validation error the validation handler
+ // will report it.
+ return
+ }
+
+ if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) {
+ // bucket name must be valid to put into the host
+ return
+ }
+
+ moveBucketToHost(r.HTTPRequest.URL, bucket)
+}
+
+func updateEndpointForAccelerate(r *request.Request) {
+ bucket, ok := bucketNameFromReqParams(r.Params)
+ if !ok {
+ // Ignore operation requests if the bucketname was not provided
+ // if this is an input validation error the validation handler
+ // will report it.
+ return
+ }
+
+ if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) {
+ r.Error = awserr.New("InvalidParameterException",
+ fmt.Sprintf("bucket name %s is not compatibile with S3 Accelerate", bucket),
+ nil)
+ return
+ }
+
+ // Change endpoint from s3(-[a-z0-1-])?.amazonaws.com to s3-accelerate.amazonaws.com
+ r.HTTPRequest.URL.Host = replaceHostRegion(r.HTTPRequest.URL.Host, "accelerate")
+ moveBucketToHost(r.HTTPRequest.URL, bucket)
+}
+
+// Attempts to retrieve the bucket name from the request input parameters.
+// If no bucket is found, or the field is empty "", false will be returned.
+func bucketNameFromReqParams(params interface{}) (string, bool) {
+ b, _ := awsutil.ValuesAtPath(params, "Bucket")
+ if len(b) == 0 {
+ return "", false
+ }
+
+ if bucket, ok := b[0].(*string); ok {
+ if bucketStr := aws.StringValue(bucket); bucketStr != "" {
+ return bucketStr, true
+ }
+ }
+
+ return "", false
+}
+
+// hostCompatibleBucketName returns true if the request should
+// put the bucket in the host. This is false if S3ForcePathStyle is
+// explicitly set or if the bucket is not DNS compatible.
+func hostCompatibleBucketName(u *url.URL, bucket string) bool {
+ // Bucket might be DNS compatible but dots in the hostname will fail
+ // certificate validation, so do not use host-style.
+ if u.Scheme == "https" && strings.Contains(bucket, ".") {
+ return false
+ }
+
+ // if the bucket is DNS compatible
+ return dnsCompatibleBucketName(bucket)
+}
+
+var reDomain = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`)
+var reIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`)
+
+// dnsCompatibleBucketName returns true if the bucket name is DNS compatible.
+// Buckets created outside of the classic region MUST be DNS compatible.
+func dnsCompatibleBucketName(bucket string) bool {
+ return reDomain.MatchString(bucket) &&
+ !reIPAddress.MatchString(bucket) &&
+ !strings.Contains(bucket, "..")
+}
+
+// moveBucketToHost moves the bucket name from the URI path to URL host.
+func moveBucketToHost(u *url.URL, bucket string) {
+ u.Host = bucket + "." + u.Host
+ u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1)
+ if u.Path == "" {
+ u.Path = "/"
+ }
+}
+
+const s3HostPrefix = "s3"
+
+// replaceHostRegion replaces the S3 region string in the host with the
+// value provided. If v is empty the host prefix returned will be s3.
+func replaceHostRegion(host, v string) string {
+ if !strings.HasPrefix(host, s3HostPrefix) {
+ return host
+ }
+
+ suffix := host[len(s3HostPrefix):]
+ for i := len(s3HostPrefix); i < len(host); i++ {
+ if host[i] == '.' {
+ // Trim until '.' leave the it in place.
+ suffix = host[i:]
+ break
+ }
+ }
+
+ if len(v) == 0 {
+ return fmt.Sprintf("s3%s", suffix)
+ }
+
+ return fmt.Sprintf("s3-%s%s", v, suffix)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go
new file mode 100644
index 0000000..8e6f330
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go
@@ -0,0 +1,8 @@
+// +build !go1.6
+
+package s3
+
+import "github.com/aws/aws-sdk-go/aws/request"
+
+func platformRequestHandlers(r *request.Request) {
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go
new file mode 100644
index 0000000..14d05f7
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go
@@ -0,0 +1,28 @@
+// +build go1.6
+
+package s3
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+func platformRequestHandlers(r *request.Request) {
+ if r.Operation.HTTPMethod == "PUT" {
+ // 100-Continue should only be used on put requests.
+ r.Handlers.Sign.PushBack(add100Continue)
+ }
+}
+
+func add100Continue(r *request.Request) {
+ if aws.BoolValue(r.Config.S3Disable100Continue) {
+ return
+ }
+ if r.HTTPRequest.ContentLength < 1024*1024*2 {
+ // Ignore requests smaller than 2MB. This helps prevent delaying
+ // requests unnecessarily.
+ return
+ }
+
+ r.HTTPRequest.Header.Set("Expect", "100-Continue")
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go
new file mode 100644
index 0000000..cf01da5
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go
@@ -0,0 +1,86 @@
+// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
+
+package s3
+
+import (
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/client"
+ "github.com/aws/aws-sdk-go/aws/client/metadata"
+ "github.com/aws/aws-sdk-go/aws/request"
+ "github.com/aws/aws-sdk-go/private/protocol/restxml"
+ "github.com/aws/aws-sdk-go/private/signer/v4"
+)
+
+// S3 is a client for Amazon S3.
+//The service client's operations are safe to be used concurrently.
+// It is not safe to mutate any of the client's properties though.
+type S3 struct {
+ *client.Client
+}
+
+// Used for custom client initialization logic
+var initClient func(*client.Client)
+
+// Used for custom request initialization logic
+var initRequest func(*request.Request)
+
+// A ServiceName is the name of the service the client will make API calls to.
+const ServiceName = "s3"
+
+// New creates a new instance of the S3 client with a session.
+// If additional configuration is needed for the client instance use the optional
+// aws.Config parameter to add your extra config.
+//
+// Example:
+// // Create a S3 client from just a session.
+// svc := s3.New(mySession)
+//
+// // Create a S3 client with additional configuration
+// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
+func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 {
+ c := p.ClientConfig(ServiceName, cfgs...)
+ return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
+}
+
+// newClient creates, initializes and returns a new service client instance.
+func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *S3 {
+ svc := &S3{
+ Client: client.New(
+ cfg,
+ metadata.ClientInfo{
+ ServiceName: ServiceName,
+ SigningRegion: signingRegion,
+ Endpoint: endpoint,
+ APIVersion: "2006-03-01",
+ },
+ handlers,
+ ),
+ }
+
+ // Handlers
+ svc.Handlers.Sign.PushBack(v4.Sign)
+ svc.Handlers.Build.PushBackNamed(restxml.BuildHandler)
+ svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler)
+ svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler)
+ svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler)
+
+ // Run custom client initialization if present
+ if initClient != nil {
+ initClient(svc.Client)
+ }
+
+ return svc
+}
+
+// newRequest creates a new request for a S3 operation and runs any
+// custom request initialization.
+func (c *S3) newRequest(op *request.Operation, params, data interface{}) *request.Request {
+ req := c.NewRequest(op, params, data)
+
+ // Run custom request initialization if present
+ if initRequest != nil {
+ initRequest(req)
+ }
+
+ return req
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go
new file mode 100644
index 0000000..268ea2f
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go
@@ -0,0 +1,44 @@
+package s3
+
+import (
+ "crypto/md5"
+ "encoding/base64"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil)
+
+func validateSSERequiresSSL(r *request.Request) {
+ if r.HTTPRequest.URL.Scheme != "https" {
+ p, _ := awsutil.ValuesAtPath(r.Params, "SSECustomerKey||CopySourceSSECustomerKey")
+ if len(p) > 0 {
+ r.Error = errSSERequiresSSL
+ }
+ }
+}
+
+func computeSSEKeys(r *request.Request) {
+ headers := []string{
+ "x-amz-server-side-encryption-customer-key",
+ "x-amz-copy-source-server-side-encryption-customer-key",
+ }
+
+ for _, h := range headers {
+ md5h := h + "-md5"
+ if key := r.HTTPRequest.Header.Get(h); key != "" {
+ // Base64-encode the value
+ b64v := base64.StdEncoding.EncodeToString([]byte(key))
+ r.HTTPRequest.Header.Set(h, b64v)
+
+ // Add MD5 if it wasn't computed
+ if r.HTTPRequest.Header.Get(md5h) == "" {
+ sum := md5.Sum([]byte(key))
+ b64sum := base64.StdEncoding.EncodeToString(sum[:])
+ r.HTTPRequest.Header.Set(md5h, b64sum)
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go
new file mode 100644
index 0000000..ce65fcd
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go
@@ -0,0 +1,36 @@
+package s3
+
+import (
+ "bytes"
+ "io/ioutil"
+ "net/http"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+func copyMultipartStatusOKUnmarhsalError(r *request.Request) {
+ b, err := ioutil.ReadAll(r.HTTPResponse.Body)
+ if err != nil {
+ r.Error = awserr.New("SerializationError", "unable to read response body", err)
+ return
+ }
+ body := bytes.NewReader(b)
+ r.HTTPResponse.Body = aws.ReadSeekCloser(body)
+ defer r.HTTPResponse.Body.(aws.ReaderSeekerCloser).Seek(0, 0)
+
+ if body.Len() == 0 {
+ // If there is no body don't attempt to parse the body.
+ return
+ }
+
+ unmarshalError(r)
+ if err, ok := r.Error.(awserr.Error); ok && err != nil {
+ if err.Code() == "SerializationError" {
+ r.Error = nil
+ return
+ }
+ r.HTTPResponse.StatusCode = http.StatusServiceUnavailable
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go
new file mode 100644
index 0000000..59e4950
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go
@@ -0,0 +1,59 @@
+package s3
+
+import (
+ "encoding/xml"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "strings"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+type xmlErrorResponse struct {
+ XMLName xml.Name `xml:"Error"`
+ Code string `xml:"Code"`
+ Message string `xml:"Message"`
+}
+
+func unmarshalError(r *request.Request) {
+ defer r.HTTPResponse.Body.Close()
+ defer io.Copy(ioutil.Discard, r.HTTPResponse.Body)
+
+ if r.HTTPResponse.StatusCode == http.StatusMovedPermanently {
+ r.Error = awserr.NewRequestFailure(
+ awserr.New("BucketRegionError",
+ fmt.Sprintf("incorrect region, the bucket is not in '%s' region",
+ aws.StringValue(r.Config.Region)),
+ nil),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+ return
+ }
+
+ if r.HTTPResponse.ContentLength == 0 {
+ // No body, use status code to generate an awserr.Error
+ r.Error = awserr.NewRequestFailure(
+ awserr.New(strings.Replace(r.HTTPResponse.Status, " ", "", -1), r.HTTPResponse.Status, nil),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+ return
+ }
+
+ resp := &xmlErrorResponse{}
+ err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp)
+ if err != nil && err != io.EOF {
+ r.Error = awserr.New("SerializationError", "failed to decode S3 XML error response", nil)
+ } else {
+ r.Error = awserr.NewRequestFailure(
+ awserr.New(resp.Code, resp.Message, nil),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ )
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go
new file mode 100644
index 0000000..cbd3d31
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go
@@ -0,0 +1,123 @@
+// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
+
+package s3
+
+import (
+ "github.com/aws/aws-sdk-go/private/waiter"
+)
+
+func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error {
+ waiterCfg := waiter.Config{
+ Operation: "HeadBucket",
+ Delay: 5,
+ MaxAttempts: 20,
+ Acceptors: []waiter.WaitAcceptor{
+ {
+ State: "success",
+ Matcher: "status",
+ Argument: "",
+ Expected: 200,
+ },
+ {
+ State: "success",
+ Matcher: "status",
+ Argument: "",
+ Expected: 301,
+ },
+ {
+ State: "success",
+ Matcher: "status",
+ Argument: "",
+ Expected: 403,
+ },
+ {
+ State: "retry",
+ Matcher: "status",
+ Argument: "",
+ Expected: 404,
+ },
+ },
+ }
+
+ w := waiter.Waiter{
+ Client: c,
+ Input: input,
+ Config: waiterCfg,
+ }
+ return w.Wait()
+}
+
+func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error {
+ waiterCfg := waiter.Config{
+ Operation: "HeadBucket",
+ Delay: 5,
+ MaxAttempts: 20,
+ Acceptors: []waiter.WaitAcceptor{
+ {
+ State: "success",
+ Matcher: "status",
+ Argument: "",
+ Expected: 404,
+ },
+ },
+ }
+
+ w := waiter.Waiter{
+ Client: c,
+ Input: input,
+ Config: waiterCfg,
+ }
+ return w.Wait()
+}
+
+func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error {
+ waiterCfg := waiter.Config{
+ Operation: "HeadObject",
+ Delay: 5,
+ MaxAttempts: 20,
+ Acceptors: []waiter.WaitAcceptor{
+ {
+ State: "success",
+ Matcher: "status",
+ Argument: "",
+ Expected: 200,
+ },
+ {
+ State: "retry",
+ Matcher: "status",
+ Argument: "",
+ Expected: 404,
+ },
+ },
+ }
+
+ w := waiter.Waiter{
+ Client: c,
+ Input: input,
+ Config: waiterCfg,
+ }
+ return w.Wait()
+}
+
+func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error {
+ waiterCfg := waiter.Config{
+ Operation: "HeadObject",
+ Delay: 5,
+ MaxAttempts: 20,
+ Acceptors: []waiter.WaitAcceptor{
+ {
+ State: "success",
+ Matcher: "status",
+ Argument: "",
+ Expected: 404,
+ },
+ },
+ }
+
+ w := waiter.Waiter{
+ Client: c,
+ Input: input,
+ Config: waiterCfg,
+ }
+ return w.Wait()
+}
diff --git a/vendor/github.com/go-ini/ini/LICENSE b/vendor/github.com/go-ini/ini/LICENSE
new file mode 100644
index 0000000..37ec93a
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md
new file mode 100644
index 0000000..1272038
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/README.md
@@ -0,0 +1,560 @@
+ini [![Build Status](https://drone.io/github.com/go-ini/ini/status.png)](https://drone.io/github.com/go-ini/ini/latest) [![](http://gocover.io/_badge/github.com/go-ini/ini)](http://gocover.io/github.com/go-ini/ini)
+===
+
+![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200)
+
+Package ini provides INI file read and write functionality in Go.
+
+[简体中文](README_ZH.md)
+
+## Feature
+
+- Load multiple data sources(`[]byte` or file) with overwrites.
+- Read with recursion values.
+- Read with parent-child sections.
+- Read with auto-increment key names.
+- Read with multiple-line values.
+- Read with tons of helper methods.
+- Read and convert values to Go types.
+- Read and **WRITE** comments of sections and keys.
+- Manipulate sections, keys and comments with ease.
+- Keep sections and keys in order as you parse and save.
+
+## Installation
+
+ go get gopkg.in/ini.v1
+
+## Getting Started
+
+### Loading from data sources
+
+A **Data Source** is either raw data in type `[]byte` or a file name with type `string` and you can load **as many as** data sources you want. Passing other types will simply return an error.
+
+```go
+cfg, err := ini.Load([]byte("raw data"), "filename")
+```
+
+Or start with an empty object:
+
+```go
+cfg := ini.Empty()
+```
+
+When you cannot decide how many data sources to load at the beginning, you still able to **Append()** them later.
+
+```go
+err := cfg.Append("other file", []byte("other raw data"))
+```
+
+### Working with sections
+
+To get a section, you would need to:
+
+```go
+section, err := cfg.GetSection("section name")
+```
+
+For a shortcut for default section, just give an empty string as name:
+
+```go
+section, err := cfg.GetSection("")
+```
+
+When you're pretty sure the section exists, following code could make your life easier:
+
+```go
+section := cfg.Section("")
+```
+
+What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you.
+
+To create a new section:
+
+```go
+err := cfg.NewSection("new section")
+```
+
+To get a list of sections or section names:
+
+```go
+sections := cfg.Sections()
+names := cfg.SectionStrings()
+```
+
+### Working with keys
+
+To get a key under a section:
+
+```go
+key, err := cfg.Section("").GetKey("key name")
+```
+
+Same rule applies to key operations:
+
+```go
+key := cfg.Section("").Key("key name")
+```
+
+To create a new key:
+
+```go
+err := cfg.Section("").NewKey("name", "value")
+```
+
+To get a list of keys or key names:
+
+```go
+keys := cfg.Section("").Keys()
+names := cfg.Section("").KeyStrings()
+```
+
+To get a clone hash of keys and corresponding values:
+
+```go
+hash := cfg.GetSection("").KeysHash()
+```
+
+### Working with values
+
+To get a string value:
+
+```go
+val := cfg.Section("").Key("key name").String()
+```
+
+To validate key value on the fly:
+
+```go
+val := cfg.Section("").Key("key name").Validate(func(in string) string {
+ if len(in) == 0 {
+ return "default"
+ }
+ return in
+})
+```
+
+To get value with types:
+
+```go
+// For boolean values:
+// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On
+// false when value is: 0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off
+v, err = cfg.Section("").Key("BOOL").Bool()
+v, err = cfg.Section("").Key("FLOAT64").Float64()
+v, err = cfg.Section("").Key("INT").Int()
+v, err = cfg.Section("").Key("INT64").Int64()
+v, err = cfg.Section("").Key("UINT").Uint()
+v, err = cfg.Section("").Key("UINT64").Uint64()
+v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339)
+v, err = cfg.Section("").Key("TIME").Time() // RFC3339
+
+v = cfg.Section("").Key("BOOL").MustBool()
+v = cfg.Section("").Key("FLOAT64").MustFloat64()
+v = cfg.Section("").Key("INT").MustInt()
+v = cfg.Section("").Key("INT64").MustInt64()
+v = cfg.Section("").Key("UINT").MustUint()
+v = cfg.Section("").Key("UINT64").MustUint64()
+v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339)
+v = cfg.Section("").Key("TIME").MustTime() // RFC3339
+
+// Methods start with Must also accept one argument for default value
+// when key not found or fail to parse value to given type.
+// Except method MustString, which you have to pass a default value.
+
+v = cfg.Section("").Key("String").MustString("default")
+v = cfg.Section("").Key("BOOL").MustBool(true)
+v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25)
+v = cfg.Section("").Key("INT").MustInt(10)
+v = cfg.Section("").Key("INT64").MustInt64(99)
+v = cfg.Section("").Key("UINT").MustUint(3)
+v = cfg.Section("").Key("UINT64").MustUint64(6)
+v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now())
+v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339
+```
+
+What if my value is three-line long?
+
+```ini
+[advance]
+ADDRESS = """404 road,
+NotFound, State, 5000
+Earth"""
+```
+
+Not a problem!
+
+```go
+cfg.Section("advance").Key("ADDRESS").String()
+
+/* --- start ---
+404 road,
+NotFound, State, 5000
+Earth
+------ end --- */
+```
+
+That's cool, how about continuation lines?
+
+```ini
+[advance]
+two_lines = how about \
+ continuation lines?
+lots_of_lines = 1 \
+ 2 \
+ 3 \
+ 4
+```
+
+Piece of cake!
+
+```go
+cfg.Section("advance").Key("two_lines").String() // how about continuation lines?
+cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4
+```
+
+Note that single quotes around values will be stripped:
+
+```ini
+foo = "some value" // foo: some value
+bar = 'some value' // bar: some value
+```
+
+That's all? Hmm, no.
+
+#### Helper methods of working with values
+
+To get value with given candidates:
+
+```go
+v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"})
+v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75})
+v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30})
+v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30})
+v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9})
+v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9})
+v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3})
+v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339
+```
+
+Default value will be presented if value of key is not in candidates you given, and default value does not need be one of candidates.
+
+To validate value in a given range:
+
+```go
+vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2)
+vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20)
+vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20)
+vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9)
+vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9)
+vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime)
+vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339
+```
+
+To auto-split value into slice:
+
+```go
+vals = cfg.Section("").Key("STRINGS").Strings(",")
+vals = cfg.Section("").Key("FLOAT64S").Float64s(",")
+vals = cfg.Section("").Key("INTS").Ints(",")
+vals = cfg.Section("").Key("INT64S").Int64s(",")
+vals = cfg.Section("").Key("UINTS").Uints(",")
+vals = cfg.Section("").Key("UINT64S").Uint64s(",")
+vals = cfg.Section("").Key("TIMES").Times(",")
+```
+
+### Save your configuration
+
+Finally, it's time to save your configuration to somewhere.
+
+A typical way to save configuration is writing it to a file:
+
+```go
+// ...
+err = cfg.SaveTo("my.ini")
+err = cfg.SaveToIndent("my.ini", "\t")
+```
+
+Another way to save is writing to a `io.Writer` interface:
+
+```go
+// ...
+cfg.WriteTo(writer)
+cfg.WriteToIndent(writer, "\t")
+```
+
+## Advanced Usage
+
+### Recursive Values
+
+For all value of keys, there is a special syntax `%(<name>)s`, where `<name>` is the key name in same section or default section, and `%(<name>)s` will be replaced by corresponding value(empty string if key not found). You can use this syntax at most 99 level of recursions.
+
+```ini
+NAME = ini
+
+[author]
+NAME = Unknwon
+GITHUB = https://github.com/%(NAME)s
+
+[package]
+FULL_NAME = github.com/go-ini/%(NAME)s
+```
+
+```go
+cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon
+cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini
+```
+
+### Parent-child Sections
+
+You can use `.` in section name to indicate parent-child relationship between two or more sections. If the key not found in the child section, library will try again on its parent section until there is no parent section.
+
+```ini
+NAME = ini
+VERSION = v1
+IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s
+
+[package]
+CLONE_URL = https://%(IMPORT_PATH)s
+
+[package.sub]
+```
+
+```go
+cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1
+```
+
+### Auto-increment Key Names
+
+If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter.
+
+```ini
+[features]
+-: Support read/write comments of keys and sections
+-: Support auto-increment of key names
+-: Support load multiple files to overwrite key values
+```
+
+```go
+cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"}
+```
+
+### Map To Struct
+
+Want more objective way to play with INI? Cool.
+
+```ini
+Name = Unknwon
+age = 21
+Male = true
+Born = 1993-01-01T20:17:05Z
+
+[Note]
+Content = Hi is a good man!
+Cities = HangZhou, Boston
+```
+
+```go
+type Note struct {
+ Content string
+ Cities []string
+}
+
+type Person struct {
+ Name string
+ Age int `ini:"age"`
+ Male bool
+ Born time.Time
+ Note
+ Created time.Time `ini:"-"`
+}
+
+func main() {
+ cfg, err := ini.Load("path/to/ini")
+ // ...
+ p := new(Person)
+ err = cfg.MapTo(p)
+ // ...
+
+ // Things can be simpler.
+ err = ini.MapTo(p, "path/to/ini")
+ // ...
+
+ // Just map a section? Fine.
+ n := new(Note)
+ err = cfg.Section("Note").MapTo(n)
+ // ...
+}
+```
+
+Can I have default value for field? Absolutely.
+
+Assign it before you map to struct. It will keep the value as it is if the key is not presented or got wrong type.
+
+```go
+// ...
+p := &Person{
+ Name: "Joe",
+}
+// ...
+```
+
+It's really cool, but what's the point if you can't give me my file back from struct?
+
+### Reflect From Struct
+
+Why not?
+
+```go
+type Embeded struct {
+ Dates []time.Time `delim:"|"`
+ Places []string
+ None []int
+}
+
+type Author struct {
+ Name string `ini:"NAME"`
+ Male bool
+ Age int
+ GPA float64
+ NeverMind string `ini:"-"`
+ *Embeded
+}
+
+func main() {
+ a := &Author{"Unknwon", true, 21, 2.8, "",
+ &Embeded{
+ []time.Time{time.Now(), time.Now()},
+ []string{"HangZhou", "Boston"},
+ []int{},
+ }}
+ cfg := ini.Empty()
+ err = ini.ReflectFrom(cfg, a)
+ // ...
+}
+```
+
+So, what do I get?
+
+```ini
+NAME = Unknwon
+Male = true
+Age = 21
+GPA = 2.8
+
+[Embeded]
+Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00
+Places = HangZhou,Boston
+None =
+```
+
+#### Name Mapper
+
+To save your time and make your code cleaner, this library supports [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) between struct field and actual section and key name.
+
+There are 2 built-in name mappers:
+
+- `AllCapsUnderscore`: it converts to format `ALL_CAPS_UNDERSCORE` then match section or key.
+- `TitleUnderscore`: it converts to format `title_underscore` then match section or key.
+
+To use them:
+
+```go
+type Info struct {
+ PackageName string
+}
+
+func main() {
+ err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("packag_name=ini"))
+ // ...
+
+ cfg, err := ini.Load([]byte("PACKAGE_NAME=ini"))
+ // ...
+ info := new(Info)
+ cfg.NameMapper = ini.AllCapsUnderscore
+ err = cfg.MapTo(info)
+ // ...
+}
+```
+
+Same rules of name mapper apply to `ini.ReflectFromWithMapper` function.
+
+#### Other Notes On Map/Reflect
+
+Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature:
+
+```go
+type Child struct {
+ Age string
+}
+
+type Parent struct {
+ Name string
+ Child
+}
+
+type Config struct {
+ City string
+ Parent
+}
+```
+
+Example configuration:
+
+```ini
+City = Boston
+
+[Parent]
+Name = Unknwon
+
+[Child]
+Age = 21
+```
+
+What if, yes, I'm paranoid, I want embedded struct to be in the same section. Well, all roads lead to Rome.
+
+```go
+type Child struct {
+ Age string
+}
+
+type Parent struct {
+ Name string
+ Child `ini:"Parent"`
+}
+
+type Config struct {
+ City string
+ Parent
+}
+```
+
+Example configuration:
+
+```ini
+City = Boston
+
+[Parent]
+Name = Unknwon
+Age = 21
+```
+
+## Getting Help
+
+- [API Documentation](https://gowalker.org/gopkg.in/ini.v1)
+- [File An Issue](https://github.com/go-ini/ini/issues/new)
+
+## FAQs
+
+### What does `BlockMode` field do?
+
+By default, library lets you read and write values so we need a locker to make sure your data is safe. But in cases that you are very sure about only reading data through the library, you can set `cfg.BlockMode = false` to speed up read operations about **50-70%** faster.
+
+### Why another INI library?
+
+Many people are using my another INI library [goconfig](https://github.com/Unknwon/goconfig), so the reason for this one is I would like to make more Go style code. Also when you set `cfg.BlockMode = false`, this one is about **10-30%** faster.
+
+To make those changes I have to confirm API broken, so it's safer to keep it in another place and start using `gopkg.in` to version my package at this time.(PS: shorter import path)
+
+## License
+
+This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text.
diff --git a/vendor/github.com/go-ini/ini/README_ZH.md b/vendor/github.com/go-ini/ini/README_ZH.md
new file mode 100644
index 0000000..45e19ed
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/README_ZH.md
@@ -0,0 +1,547 @@
+本包提供了 Go 语言中读写 INI 文件的功能。
+
+## 功能特性
+
+- 支持覆盖加载多个数据源(`[]byte` 或文件)
+- 支持递归读取键值
+- 支持读取父子分区
+- 支持读取自增键名
+- 支持读取多行的键值
+- 支持大量辅助方法
+- 支持在读取时直接转换为 Go 语言类型
+- 支持读取和 **写入** 分区和键的注释
+- 轻松操作分区、键值和注释
+- 在保存文件时分区和键值会保持原有的顺序
+
+## 下载安装
+
+ go get gopkg.in/ini.v1
+
+## 开始使用
+
+### 从数据源加载
+
+一个 **数据源** 可以是 `[]byte` 类型的原始数据,或 `string` 类型的文件路径。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。
+
+```go
+cfg, err := ini.Load([]byte("raw data"), "filename")
+```
+
+或者从一个空白的文件开始:
+
+```go
+cfg := ini.Empty()
+```
+
+当您在一开始无法决定需要加载哪些数据源时,仍可以使用 **Append()** 在需要的时候加载它们。
+
+```go
+err := cfg.Append("other file", []byte("other raw data"))
+```
+
+### 操作分区(Section)
+
+获取指定分区:
+
+```go
+section, err := cfg.GetSection("section name")
+```
+
+如果您想要获取默认分区,则可以用空字符串代替分区名:
+
+```go
+section, err := cfg.GetSection("")
+```
+
+当您非常确定某个分区是存在的,可以使用以下简便方法:
+
+```go
+section := cfg.Section("")
+```
+
+如果不小心判断错了,要获取的分区其实是不存在的,那会发生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。
+
+创建一个分区:
+
+```go
+err := cfg.NewSection("new section")
+```
+
+获取所有分区对象或名称:
+
+```go
+sections := cfg.Sections()
+names := cfg.SectionStrings()
+```
+
+### 操作键(Key)
+
+获取某个分区下的键:
+
+```go
+key, err := cfg.Section("").GetKey("key name")
+```
+
+和分区一样,您也可以直接获取键而忽略错误处理:
+
+```go
+key := cfg.Section("").Key("key name")
+```
+
+创建一个新的键:
+
+```go
+err := cfg.Section("").NewKey("name", "value")
+```
+
+获取分区下的所有键或键名:
+
+```go
+keys := cfg.Section("").Keys()
+names := cfg.Section("").KeyStrings()
+```
+
+获取分区下的所有键值对的克隆:
+
+```go
+hash := cfg.GetSection("").KeysHash()
+```
+
+### 操作键值(Value)
+
+获取一个类型为字符串(string)的值:
+
+```go
+val := cfg.Section("").Key("key name").String()
+```
+
+获取值的同时通过自定义函数进行处理验证:
+
+```go
+val := cfg.Section("").Key("key name").Validate(func(in string) string {
+ if len(in) == 0 {
+ return "default"
+ }
+ return in
+})
+```
+
+获取其它类型的值:
+
+```go
+// 布尔值的规则:
+// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On
+// false 当值为:0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off
+v, err = cfg.Section("").Key("BOOL").Bool()
+v, err = cfg.Section("").Key("FLOAT64").Float64()
+v, err = cfg.Section("").Key("INT").Int()
+v, err = cfg.Section("").Key("INT64").Int64()
+v, err = cfg.Section("").Key("UINT").Uint()
+v, err = cfg.Section("").Key("UINT64").Uint64()
+v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339)
+v, err = cfg.Section("").Key("TIME").Time() // RFC3339
+
+v = cfg.Section("").Key("BOOL").MustBool()
+v = cfg.Section("").Key("FLOAT64").MustFloat64()
+v = cfg.Section("").Key("INT").MustInt()
+v = cfg.Section("").Key("INT64").MustInt64()
+v = cfg.Section("").Key("UINT").MustUint()
+v = cfg.Section("").Key("UINT64").MustUint64()
+v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339)
+v = cfg.Section("").Key("TIME").MustTime() // RFC3339
+
+// 由 Must 开头的方法名允许接收一个相同类型的参数来作为默认值,
+// 当键不存在或者转换失败时,则会直接返回该默认值。
+// 但是,MustString 方法必须传递一个默认值。
+
+v = cfg.Seciont("").Key("String").MustString("default")
+v = cfg.Section("").Key("BOOL").MustBool(true)
+v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25)
+v = cfg.Section("").Key("INT").MustInt(10)
+v = cfg.Section("").Key("INT64").MustInt64(99)
+v = cfg.Section("").Key("UINT").MustUint(3)
+v = cfg.Section("").Key("UINT64").MustUint64(6)
+v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now())
+v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339
+```
+
+如果我的值有好多行怎么办?
+
+```ini
+[advance]
+ADDRESS = """404 road,
+NotFound, State, 5000
+Earth"""
+```
+
+嗯哼?小 case!
+
+```go
+cfg.Section("advance").Key("ADDRESS").String()
+
+/* --- start ---
+404 road,
+NotFound, State, 5000
+Earth
+------ end --- */
+```
+
+赞爆了!那要是我属于一行的内容写不下想要写到第二行怎么办?
+
+```ini
+[advance]
+two_lines = how about \
+ continuation lines?
+lots_of_lines = 1 \
+ 2 \
+ 3 \
+ 4
+```
+
+简直是小菜一碟!
+
+```go
+cfg.Section("advance").Key("two_lines").String() // how about continuation lines?
+cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4
+```
+
+需要注意的是,值两侧的单引号会被自动剔除:
+
+```ini
+foo = "some value" // foo: some value
+bar = 'some value' // bar: some value
+```
+
+这就是全部了?哈哈,当然不是。
+
+#### 操作键值的辅助方法
+
+获取键值时设定候选值:
+
+```go
+v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"})
+v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75})
+v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30})
+v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30})
+v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9})
+v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9})
+v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3})
+v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339
+```
+
+如果获取到的值不是候选值的任意一个,则会返回默认值,而默认值不需要是候选值中的一员。
+
+验证获取的值是否在指定范围内:
+
+```go
+vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2)
+vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20)
+vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20)
+vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9)
+vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9)
+vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime)
+vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339
+```
+
+自动分割键值为切片(slice):
+
+```go
+vals = cfg.Section("").Key("STRINGS").Strings(",")
+vals = cfg.Section("").Key("FLOAT64S").Float64s(",")
+vals = cfg.Section("").Key("INTS").Ints(",")
+vals = cfg.Section("").Key("INT64S").Int64s(",")
+vals = cfg.Section("").Key("UINTS").Uints(",")
+vals = cfg.Section("").Key("UINT64S").Uint64s(",")
+vals = cfg.Section("").Key("TIMES").Times(",")
+```
+
+### 保存配置
+
+终于到了这个时刻,是时候保存一下配置了。
+
+比较原始的做法是输出配置到某个文件:
+
+```go
+// ...
+err = cfg.SaveTo("my.ini")
+err = cfg.SaveToIndent("my.ini", "\t")
+```
+
+另一个比较高级的做法是写入到任何实现 `io.Writer` 接口的对象中:
+
+```go
+// ...
+cfg.WriteTo(writer)
+cfg.WriteToIndent(writer, "\t")
+```
+
+### 高级用法
+
+#### 递归读取键值
+
+在获取所有键值的过程中,特殊语法 `%(<name>)s` 会被应用,其中 `<name>` 可以是相同分区或者默认分区下的键名。字符串 `%(<name>)s` 会被相应的键值所替代,如果指定的键不存在,则会用空字符串替代。您可以最多使用 99 层的递归嵌套。
+
+```ini
+NAME = ini
+
+[author]
+NAME = Unknwon
+GITHUB = https://github.com/%(NAME)s
+
+[package]
+FULL_NAME = github.com/go-ini/%(NAME)s
+```
+
+```go
+cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon
+cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini
+```
+
+#### 读取父子分区
+
+您可以在分区名称中使用 `.` 来表示两个或多个分区之间的父子关系。如果某个键在子分区中不存在,则会去它的父分区中再次寻找,直到没有父分区为止。
+
+```ini
+NAME = ini
+VERSION = v1
+IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s
+
+[package]
+CLONE_URL = https://%(IMPORT_PATH)s
+
+[package.sub]
+```
+
+```go
+cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1
+```
+
+#### 读取自增键名
+
+如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。
+
+```ini
+[features]
+-: Support read/write comments of keys and sections
+-: Support auto-increment of key names
+-: Support load multiple files to overwrite key values
+```
+
+```go
+cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"}
+```
+
+### 映射到结构
+
+想要使用更加面向对象的方式玩转 INI 吗?好主意。
+
+```ini
+Name = Unknwon
+age = 21
+Male = true
+Born = 1993-01-01T20:17:05Z
+
+[Note]
+Content = Hi is a good man!
+Cities = HangZhou, Boston
+```
+
+```go
+type Note struct {
+ Content string
+ Cities []string
+}
+
+type Person struct {
+ Name string
+ Age int `ini:"age"`
+ Male bool
+ Born time.Time
+ Note
+ Created time.Time `ini:"-"`
+}
+
+func main() {
+ cfg, err := ini.Load("path/to/ini")
+ // ...
+ p := new(Person)
+ err = cfg.MapTo(p)
+ // ...
+
+ // 一切竟可以如此的简单。
+ err = ini.MapTo(p, "path/to/ini")
+ // ...
+
+ // 嗯哼?只需要映射一个分区吗?
+ n := new(Note)
+ err = cfg.Section("Note").MapTo(n)
+ // ...
+}
+```
+
+结构的字段怎么设置默认值呢?很简单,只要在映射之前对指定字段进行赋值就可以了。如果键未找到或者类型错误,该值不会发生改变。
+
+```go
+// ...
+p := &Person{
+ Name: "Joe",
+}
+// ...
+```
+
+这样玩 INI 真的好酷啊!然而,如果不能还给我原来的配置文件,有什么卵用?
+
+### 从结构反射
+
+可是,我有说不能吗?
+
+```go
+type Embeded struct {
+ Dates []time.Time `delim:"|"`
+ Places []string
+ None []int
+}
+
+type Author struct {
+ Name string `ini:"NAME"`
+ Male bool
+ Age int
+ GPA float64
+ NeverMind string `ini:"-"`
+ *Embeded
+}
+
+func main() {
+ a := &Author{"Unknwon", true, 21, 2.8, "",
+ &Embeded{
+ []time.Time{time.Now(), time.Now()},
+ []string{"HangZhou", "Boston"},
+ []int{},
+ }}
+ cfg := ini.Empty()
+ err = ini.ReflectFrom(cfg, a)
+ // ...
+}
+```
+
+瞧瞧,奇迹发生了。
+
+```ini
+NAME = Unknwon
+Male = true
+Age = 21
+GPA = 2.8
+
+[Embeded]
+Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00
+Places = HangZhou,Boston
+None =
+```
+
+#### 名称映射器(Name Mapper)
+
+为了节省您的时间并简化代码,本库支持类型为 [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) 的名称映射器,该映射器负责结构字段名与分区名和键名之间的映射。
+
+目前有 2 款内置的映射器:
+
+- `AllCapsUnderscore`:该映射器将字段名转换至格式 `ALL_CAPS_UNDERSCORE` 后再去匹配分区名和键名。
+- `TitleUnderscore`:该映射器将字段名转换至格式 `title_underscore` 后再去匹配分区名和键名。
+
+使用方法:
+
+```go
+type Info struct{
+ PackageName string
+}
+
+func main() {
+ err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("packag_name=ini"))
+ // ...
+
+ cfg, err := ini.Load([]byte("PACKAGE_NAME=ini"))
+ // ...
+ info := new(Info)
+ cfg.NameMapper = ini.AllCapsUnderscore
+ err = cfg.MapTo(info)
+ // ...
+}
+```
+
+使用函数 `ini.ReflectFromWithMapper` 时也可应用相同的规则。
+
+#### 映射/反射的其它说明
+
+任何嵌入的结构都会被默认认作一个不同的分区,并且不会自动产生所谓的父子分区关联:
+
+```go
+type Child struct {
+ Age string
+}
+
+type Parent struct {
+ Name string
+ Child
+}
+
+type Config struct {
+ City string
+ Parent
+}
+```
+
+示例配置文件:
+
+```ini
+City = Boston
+
+[Parent]
+Name = Unknwon
+
+[Child]
+Age = 21
+```
+
+很好,但是,我就是要嵌入结构也在同一个分区。好吧,你爹是李刚!
+
+```go
+type Child struct {
+ Age string
+}
+
+type Parent struct {
+ Name string
+ Child `ini:"Parent"`
+}
+
+type Config struct {
+ City string
+ Parent
+}
+```
+
+示例配置文件:
+
+```ini
+City = Boston
+
+[Parent]
+Name = Unknwon
+Age = 21
+```
+
+## 获取帮助
+
+- [API 文档](https://gowalker.org/gopkg.in/ini.v1)
+- [创建工单](https://github.com/go-ini/ini/issues/new)
+
+## 常见问题
+
+### 字段 `BlockMode` 是什么?
+
+默认情况下,本库会在您进行读写操作时采用锁机制来确保数据时间。但在某些情况下,您非常确定只进行读操作。此时,您可以通过设置 `cfg.BlockMode = false` 来将读操作提升大约 **50-70%** 的性能。
+
+### 为什么要写另一个 INI 解析库?
+
+许多人都在使用我的 [goconfig](https://github.com/Unknwon/goconfig) 来完成对 INI 文件的操作,但我希望使用更加 Go 风格的代码。并且当您设置 `cfg.BlockMode = false` 时,会有大约 **10-30%** 的性能提升。
+
+为了做出这些改变,我必须对 API 进行破坏,所以新开一个仓库是最安全的做法。除此之外,本库直接使用 `gopkg.in` 来进行版本化发布。(其实真相是导入路径更短了)
diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go
new file mode 100644
index 0000000..1fee789
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/ini.go
@@ -0,0 +1,1226 @@
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+// Package ini provides INI file read and write functionality in Go.
+package ini
+
+import (
+ "bufio"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+)
+
+const (
+ DEFAULT_SECTION = "DEFAULT"
+ // Maximum allowed depth when recursively substituing variable names.
+ _DEPTH_VALUES = 99
+
+ _VERSION = "1.6.0"
+)
+
+func Version() string {
+ return _VERSION
+}
+
+var (
+ LineBreak = "\n"
+
+ // Variable regexp pattern: %(variable)s
+ varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`)
+
+ // Write spaces around "=" to look better.
+ PrettyFormat = true
+)
+
+func init() {
+ if runtime.GOOS == "windows" {
+ LineBreak = "\r\n"
+ }
+}
+
+func inSlice(str string, s []string) bool {
+ for _, v := range s {
+ if str == v {
+ return true
+ }
+ }
+ return false
+}
+
+// dataSource is a interface that returns file content.
+type dataSource interface {
+ ReadCloser() (io.ReadCloser, error)
+}
+
+type sourceFile struct {
+ name string
+}
+
+func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) {
+ return os.Open(s.name)
+}
+
+type bytesReadCloser struct {
+ reader io.Reader
+}
+
+func (rc *bytesReadCloser) Read(p []byte) (n int, err error) {
+ return rc.reader.Read(p)
+}
+
+func (rc *bytesReadCloser) Close() error {
+ return nil
+}
+
+type sourceData struct {
+ data []byte
+}
+
+func (s *sourceData) ReadCloser() (io.ReadCloser, error) {
+ return &bytesReadCloser{bytes.NewReader(s.data)}, nil
+}
+
+// ____ __.
+// | |/ _|____ ___.__.
+// | <_/ __ < | |
+// | | \ ___/\___ |
+// |____|__ \___ > ____|
+// \/ \/\/
+
+// Key represents a key under a section.
+type Key struct {
+ s *Section
+ Comment string
+ name string
+ value string
+ isAutoIncr bool
+}
+
+// Name returns name of key.
+func (k *Key) Name() string {
+ return k.name
+}
+
+// Value returns raw value of key for performance purpose.
+func (k *Key) Value() string {
+ return k.value
+}
+
+// String returns string representation of value.
+func (k *Key) String() string {
+ val := k.value
+ if strings.Index(val, "%") == -1 {
+ return val
+ }
+
+ for i := 0; i < _DEPTH_VALUES; i++ {
+ vr := varPattern.FindString(val)
+ if len(vr) == 0 {
+ break
+ }
+
+ // Take off leading '%(' and trailing ')s'.
+ noption := strings.TrimLeft(vr, "%(")
+ noption = strings.TrimRight(noption, ")s")
+
+ // Search in the same section.
+ nk, err := k.s.GetKey(noption)
+ if err != nil {
+ // Search again in default section.
+ nk, _ = k.s.f.Section("").GetKey(noption)
+ }
+
+ // Substitute by new value and take off leading '%(' and trailing ')s'.
+ val = strings.Replace(val, vr, nk.value, -1)
+ }
+ return val
+}
+
+// Validate accepts a validate function which can
+// return modifed result as key value.
+func (k *Key) Validate(fn func(string) string) string {
+ return fn(k.String())
+}
+
+// parseBool returns the boolean value represented by the string.
+//
+// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On,
+// 0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off.
+// Any other value returns an error.
+func parseBool(str string) (value bool, err error) {
+ switch str {
+ case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "ON", "on", "On":
+ return true, nil
+ case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "OFF", "off", "Off":
+ return false, nil
+ }
+ return false, fmt.Errorf("parsing \"%s\": invalid syntax", str)
+}
+
+// Bool returns bool type value.
+func (k *Key) Bool() (bool, error) {
+ return parseBool(k.String())
+}
+
+// Float64 returns float64 type value.
+func (k *Key) Float64() (float64, error) {
+ return strconv.ParseFloat(k.String(), 64)
+}
+
+// Int returns int type value.
+func (k *Key) Int() (int, error) {
+ return strconv.Atoi(k.String())
+}
+
+// Int64 returns int64 type value.
+func (k *Key) Int64() (int64, error) {
+ return strconv.ParseInt(k.String(), 10, 64)
+}
+
+// Uint returns uint type valued.
+func (k *Key) Uint() (uint, error) {
+ u, e := strconv.ParseUint(k.String(), 10, 64)
+ return uint(u), e
+}
+
+// Uint64 returns uint64 type value.
+func (k *Key) Uint64() (uint64, error) {
+ return strconv.ParseUint(k.String(), 10, 64)
+}
+
+// Duration returns time.Duration type value.
+func (k *Key) Duration() (time.Duration, error) {
+ return time.ParseDuration(k.String())
+}
+
+// TimeFormat parses with given format and returns time.Time type value.
+func (k *Key) TimeFormat(format string) (time.Time, error) {
+ return time.Parse(format, k.String())
+}
+
+// Time parses with RFC3339 format and returns time.Time type value.
+func (k *Key) Time() (time.Time, error) {
+ return k.TimeFormat(time.RFC3339)
+}
+
+// MustString returns default value if key value is empty.
+func (k *Key) MustString(defaultVal string) string {
+ val := k.String()
+ if len(val) == 0 {
+ return defaultVal
+ }
+ return val
+}
+
+// MustBool always returns value without error,
+// it returns false if error occurs.
+func (k *Key) MustBool(defaultVal ...bool) bool {
+ val, err := k.Bool()
+ if len(defaultVal) > 0 && err != nil {
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustFloat64 always returns value without error,
+// it returns 0.0 if error occurs.
+func (k *Key) MustFloat64(defaultVal ...float64) float64 {
+ val, err := k.Float64()
+ if len(defaultVal) > 0 && err != nil {
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustInt always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustInt(defaultVal ...int) int {
+ val, err := k.Int()
+ if len(defaultVal) > 0 && err != nil {
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustInt64 always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustInt64(defaultVal ...int64) int64 {
+ val, err := k.Int64()
+ if len(defaultVal) > 0 && err != nil {
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustUint always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustUint(defaultVal ...uint) uint {
+ val, err := k.Uint()
+ if len(defaultVal) > 0 && err != nil {
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustUint64 always returns value without error,
+// it returns 0 if error occurs.
+func (k *Key) MustUint64(defaultVal ...uint64) uint64 {
+ val, err := k.Uint64()
+ if len(defaultVal) > 0 && err != nil {
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustDuration always returns value without error,
+// it returns zero value if error occurs.
+func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration {
+ val, err := k.Duration()
+ if len(defaultVal) > 0 && err != nil {
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustTimeFormat always parses with given format and returns value without error,
+// it returns zero value if error occurs.
+func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time {
+ val, err := k.TimeFormat(format)
+ if len(defaultVal) > 0 && err != nil {
+ return defaultVal[0]
+ }
+ return val
+}
+
+// MustTime always parses with RFC3339 format and returns value without error,
+// it returns zero value if error occurs.
+func (k *Key) MustTime(defaultVal ...time.Time) time.Time {
+ return k.MustTimeFormat(time.RFC3339, defaultVal...)
+}
+
+// In always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) In(defaultVal string, candidates []string) string {
+ val := k.String()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InFloat64 always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 {
+ val := k.MustFloat64()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InInt always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InInt(defaultVal int, candidates []int) int {
+ val := k.MustInt()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InInt64 always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 {
+ val := k.MustInt64()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InUint always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InUint(defaultVal uint, candidates []uint) uint {
+ val := k.MustUint()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InUint64 always returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 {
+ val := k.MustUint64()
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InTimeFormat always parses with given format and returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time {
+ val := k.MustTimeFormat(format)
+ for _, cand := range candidates {
+ if val == cand {
+ return val
+ }
+ }
+ return defaultVal
+}
+
+// InTime always parses with RFC3339 format and returns value without error,
+// it returns default value if error occurs or doesn't fit into candidates.
+func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time {
+ return k.InTimeFormat(time.RFC3339, defaultVal, candidates)
+}
+
+// RangeFloat64 checks if value is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 {
+ val := k.MustFloat64()
+ if val < min || val > max {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeInt checks if value is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeInt(defaultVal, min, max int) int {
+ val := k.MustInt()
+ if val < min || val > max {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeInt64 checks if value is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeInt64(defaultVal, min, max int64) int64 {
+ val := k.MustInt64()
+ if val < min || val > max {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeTimeFormat checks if value with given format is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time {
+ val := k.MustTimeFormat(format)
+ if val.Unix() < min.Unix() || val.Unix() > max.Unix() {
+ return defaultVal
+ }
+ return val
+}
+
+// RangeTime checks if value with RFC3339 format is in given range inclusively,
+// and returns default value if it's not.
+func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time {
+ return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max)
+}
+
+// Strings returns list of string devide by given delimiter.
+func (k *Key) Strings(delim string) []string {
+ str := k.String()
+ if len(str) == 0 {
+ return []string{}
+ }
+
+ vals := strings.Split(str, delim)
+ for i := range vals {
+ vals[i] = strings.TrimSpace(vals[i])
+ }
+ return vals
+}
+
+// Float64s returns list of float64 devide by given delimiter.
+func (k *Key) Float64s(delim string) []float64 {
+ strs := k.Strings(delim)
+ vals := make([]float64, len(strs))
+ for i := range strs {
+ vals[i], _ = strconv.ParseFloat(strs[i], 64)
+ }
+ return vals
+}
+
+// Ints returns list of int devide by given delimiter.
+func (k *Key) Ints(delim string) []int {
+ strs := k.Strings(delim)
+ vals := make([]int, len(strs))
+ for i := range strs {
+ vals[i], _ = strconv.Atoi(strs[i])
+ }
+ return vals
+}
+
+// Int64s returns list of int64 devide by given delimiter.
+func (k *Key) Int64s(delim string) []int64 {
+ strs := k.Strings(delim)
+ vals := make([]int64, len(strs))
+ for i := range strs {
+ vals[i], _ = strconv.ParseInt(strs[i], 10, 64)
+ }
+ return vals
+}
+
+// Uints returns list of uint devide by given delimiter.
+func (k *Key) Uints(delim string) []uint {
+ strs := k.Strings(delim)
+ vals := make([]uint, len(strs))
+ for i := range strs {
+ u, _ := strconv.ParseUint(strs[i], 10, 64)
+ vals[i] = uint(u)
+ }
+ return vals
+}
+
+// Uint64s returns list of uint64 devide by given delimiter.
+func (k *Key) Uint64s(delim string) []uint64 {
+ strs := k.Strings(delim)
+ vals := make([]uint64, len(strs))
+ for i := range strs {
+ vals[i], _ = strconv.ParseUint(strs[i], 10, 64)
+ }
+ return vals
+}
+
+// TimesFormat parses with given format and returns list of time.Time devide by given delimiter.
+func (k *Key) TimesFormat(format, delim string) []time.Time {
+ strs := k.Strings(delim)
+ vals := make([]time.Time, len(strs))
+ for i := range strs {
+ vals[i], _ = time.Parse(format, strs[i])
+ }
+ return vals
+}
+
+// Times parses with RFC3339 format and returns list of time.Time devide by given delimiter.
+func (k *Key) Times(delim string) []time.Time {
+ return k.TimesFormat(time.RFC3339, delim)
+}
+
+// SetValue changes key value.
+func (k *Key) SetValue(v string) {
+ k.value = v
+}
+
+// _________ __ .__
+// / _____/ ____ _____/ |_|__| ____ ____
+// \_____ \_/ __ \_/ ___\ __\ |/ _ \ / \
+// / \ ___/\ \___| | | ( <_> ) | \
+// /_______ /\___ >\___ >__| |__|\____/|___| /
+// \/ \/ \/ \/
+
+// Section represents a config section.
+type Section struct {
+ f *File
+ Comment string
+ name string
+ keys map[string]*Key
+ keyList []string
+ keysHash map[string]string
+}
+
+func newSection(f *File, name string) *Section {
+ return &Section{f, "", name, make(map[string]*Key), make([]string, 0, 10), make(map[string]string)}
+}
+
+// Name returns name of Section.
+func (s *Section) Name() string {
+ return s.name
+}
+
+// NewKey creates a new key to given section.
+func (s *Section) NewKey(name, val string) (*Key, error) {
+ if len(name) == 0 {
+ return nil, errors.New("error creating new key: empty key name")
+ }
+
+ if s.f.BlockMode {
+ s.f.lock.Lock()
+ defer s.f.lock.Unlock()
+ }
+
+ if inSlice(name, s.keyList) {
+ s.keys[name].value = val
+ return s.keys[name], nil
+ }
+
+ s.keyList = append(s.keyList, name)
+ s.keys[name] = &Key{s, "", name, val, false}
+ s.keysHash[name] = val
+ return s.keys[name], nil
+}
+
+// GetKey returns key in section by given name.
+func (s *Section) GetKey(name string) (*Key, error) {
+ // FIXME: change to section level lock?
+ if s.f.BlockMode {
+ s.f.lock.RLock()
+ }
+ key := s.keys[name]
+ if s.f.BlockMode {
+ s.f.lock.RUnlock()
+ }
+
+ if key == nil {
+ // Check if it is a child-section.
+ sname := s.name
+ for {
+ if i := strings.LastIndex(sname, "."); i > -1 {
+ sname = sname[:i]
+ sec, err := s.f.GetSection(sname)
+ if err != nil {
+ continue
+ }
+ return sec.GetKey(name)
+ } else {
+ break
+ }
+ }
+ return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name)
+ }
+ return key, nil
+}
+
+// Key assumes named Key exists in section and returns a zero-value when not.
+func (s *Section) Key(name string) *Key {
+ key, err := s.GetKey(name)
+ if err != nil {
+ // It's OK here because the only possible error is empty key name,
+ // but if it's empty, this piece of code won't be executed.
+ key, _ = s.NewKey(name, "")
+ return key
+ }
+ return key
+}
+
+// Keys returns list of keys of section.
+func (s *Section) Keys() []*Key {
+ keys := make([]*Key, len(s.keyList))
+ for i := range s.keyList {
+ keys[i] = s.Key(s.keyList[i])
+ }
+ return keys
+}
+
+// KeyStrings returns list of key names of section.
+func (s *Section) KeyStrings() []string {
+ list := make([]string, len(s.keyList))
+ copy(list, s.keyList)
+ return list
+}
+
+// KeysHash returns keys hash consisting of names and values.
+func (s *Section) KeysHash() map[string]string {
+ if s.f.BlockMode {
+ s.f.lock.RLock()
+ defer s.f.lock.RUnlock()
+ }
+
+ hash := map[string]string{}
+ for key, value := range s.keysHash {
+ hash[key] = value
+ }
+ return hash
+}
+
+// DeleteKey deletes a key from section.
+func (s *Section) DeleteKey(name string) {
+ if s.f.BlockMode {
+ s.f.lock.Lock()
+ defer s.f.lock.Unlock()
+ }
+
+ for i, k := range s.keyList {
+ if k == name {
+ s.keyList = append(s.keyList[:i], s.keyList[i+1:]...)
+ delete(s.keys, name)
+ return
+ }
+ }
+}
+
+// ___________.__.__
+// \_ _____/|__| | ____
+// | __) | | | _/ __ \
+// | \ | | |_\ ___/
+// \___ / |__|____/\___ >
+// \/ \/
+
+// File represents a combination of a or more INI file(s) in memory.
+type File struct {
+ // Should make things safe, but sometimes doesn't matter.
+ BlockMode bool
+ // Make sure data is safe in multiple goroutines.
+ lock sync.RWMutex
+
+ // Allow combination of multiple data sources.
+ dataSources []dataSource
+ // Actual data is stored here.
+ sections map[string]*Section
+
+ // To keep data in order.
+ sectionList []string
+
+ NameMapper
+}
+
+// newFile initializes File object with given data sources.
+func newFile(dataSources []dataSource) *File {
+ return &File{
+ BlockMode: true,
+ dataSources: dataSources,
+ sections: make(map[string]*Section),
+ sectionList: make([]string, 0, 10),
+ }
+}
+
+func parseDataSource(source interface{}) (dataSource, error) {
+ switch s := source.(type) {
+ case string:
+ return sourceFile{s}, nil
+ case []byte:
+ return &sourceData{s}, nil
+ default:
+ return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s)
+ }
+}
+
+// Load loads and parses from INI data sources.
+// Arguments can be mixed of file name with string type, or raw data in []byte.
+func Load(source interface{}, others ...interface{}) (_ *File, err error) {
+ sources := make([]dataSource, len(others)+1)
+ sources[0], err = parseDataSource(source)
+ if err != nil {
+ return nil, err
+ }
+ for i := range others {
+ sources[i+1], err = parseDataSource(others[i])
+ if err != nil {
+ return nil, err
+ }
+ }
+ f := newFile(sources)
+ return f, f.Reload()
+}
+
+// Empty returns an empty file object.
+func Empty() *File {
+ // Ignore error here, we sure our data is good.
+ f, _ := Load([]byte(""))
+ return f
+}
+
+// NewSection creates a new section.
+func (f *File) NewSection(name string) (*Section, error) {
+ if len(name) == 0 {
+ return nil, errors.New("error creating new section: empty section name")
+ }
+
+ if f.BlockMode {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ }
+
+ if inSlice(name, f.sectionList) {
+ return f.sections[name], nil
+ }
+
+ f.sectionList = append(f.sectionList, name)
+ f.sections[name] = newSection(f, name)
+ return f.sections[name], nil
+}
+
+// NewSections creates a list of sections.
+func (f *File) NewSections(names ...string) (err error) {
+ for _, name := range names {
+ if _, err = f.NewSection(name); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// GetSection returns section by given name.
+func (f *File) GetSection(name string) (*Section, error) {
+ if len(name) == 0 {
+ name = DEFAULT_SECTION
+ }
+
+ if f.BlockMode {
+ f.lock.RLock()
+ defer f.lock.RUnlock()
+ }
+
+ sec := f.sections[name]
+ if sec == nil {
+ return nil, fmt.Errorf("error when getting section: section '%s' not exists", name)
+ }
+ return sec, nil
+}
+
+// Section assumes named section exists and returns a zero-value when not.
+func (f *File) Section(name string) *Section {
+ sec, err := f.GetSection(name)
+ if err != nil {
+ // Note: It's OK here because the only possible error is empty section name,
+ // but if it's empty, this piece of code won't be executed.
+ sec, _ = f.NewSection(name)
+ return sec
+ }
+ return sec
+}
+
+// Section returns list of Section.
+func (f *File) Sections() []*Section {
+ sections := make([]*Section, len(f.sectionList))
+ for i := range f.sectionList {
+ sections[i] = f.Section(f.sectionList[i])
+ }
+ return sections
+}
+
+// SectionStrings returns list of section names.
+func (f *File) SectionStrings() []string {
+ list := make([]string, len(f.sectionList))
+ copy(list, f.sectionList)
+ return list
+}
+
+// DeleteSection deletes a section.
+func (f *File) DeleteSection(name string) {
+ if f.BlockMode {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+ }
+
+ if len(name) == 0 {
+ name = DEFAULT_SECTION
+ }
+
+ for i, s := range f.sectionList {
+ if s == name {
+ f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...)
+ delete(f.sections, name)
+ return
+ }
+ }
+}
+
+func cutComment(str string) string {
+ i := strings.Index(str, "#")
+ if i == -1 {
+ return str
+ }
+ return str[:i]
+}
+
+func checkMultipleLines(buf *bufio.Reader, line, val, valQuote string) (string, error) {
+ isEnd := false
+ for {
+ next, err := buf.ReadString('\n')
+ if err != nil {
+ if err != io.EOF {
+ return "", err
+ }
+ isEnd = true
+ }
+ pos := strings.LastIndex(next, valQuote)
+ if pos > -1 {
+ val += next[:pos]
+ break
+ }
+ val += next
+ if isEnd {
+ return "", fmt.Errorf("error parsing line: missing closing key quote from '%s' to '%s'", line, next)
+ }
+ }
+ return val, nil
+}
+
+func checkContinuationLines(buf *bufio.Reader, val string) (string, bool, error) {
+ isEnd := false
+ for {
+ valLen := len(val)
+ if valLen == 0 || val[valLen-1] != '\\' {
+ break
+ }
+ val = val[:valLen-1]
+
+ next, err := buf.ReadString('\n')
+ if err != nil {
+ if err != io.EOF {
+ return "", isEnd, err
+ }
+ isEnd = true
+ }
+
+ next = strings.TrimSpace(next)
+ if len(next) == 0 {
+ break
+ }
+ val += next
+ }
+ return val, isEnd, nil
+}
+
+// parse parses data through an io.Reader.
+func (f *File) parse(reader io.Reader) error {
+ buf := bufio.NewReader(reader)
+
+ // Handle BOM-UTF8.
+ // http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding
+ mask, err := buf.Peek(3)
+ if err == nil && len(mask) >= 3 && mask[0] == 239 && mask[1] == 187 && mask[2] == 191 {
+ buf.Read(mask)
+ }
+
+ count := 1
+ comments := ""
+ isEnd := false
+
+ section, err := f.NewSection(DEFAULT_SECTION)
+ if err != nil {
+ return err
+ }
+
+ for {
+ line, err := buf.ReadString('\n')
+ line = strings.TrimSpace(line)
+ length := len(line)
+
+ // Check error and ignore io.EOF just for a moment.
+ if err != nil {
+ if err != io.EOF {
+ return fmt.Errorf("error reading next line: %v", err)
+ }
+ // The last line of file could be an empty line.
+ if length == 0 {
+ break
+ }
+ isEnd = true
+ }
+
+ // Skip empty lines.
+ if length == 0 {
+ continue
+ }
+
+ switch {
+ case line[0] == '#' || line[0] == ';': // Comments.
+ if len(comments) == 0 {
+ comments = line
+ } else {
+ comments += LineBreak + line
+ }
+ continue
+ case line[0] == '[' && line[length-1] == ']': // New sction.
+ section, err = f.NewSection(strings.TrimSpace(line[1 : length-1]))
+ if err != nil {
+ return err
+ }
+
+ if len(comments) > 0 {
+ section.Comment = comments
+ comments = ""
+ }
+ // Reset counter.
+ count = 1
+ continue
+ }
+
+ // Other possibilities.
+ var (
+ i int
+ keyQuote string
+ kname string
+ valQuote string
+ val string
+ )
+
+ // Key name surrounded by quotes.
+ if line[0] == '"' {
+ if length > 6 && line[0:3] == `"""` {
+ keyQuote = `"""`
+ } else {
+ keyQuote = `"`
+ }
+ } else if line[0] == '`' {
+ keyQuote = "`"
+ }
+ if len(keyQuote) > 0 {
+ qLen := len(keyQuote)
+ pos := strings.Index(line[qLen:], keyQuote)
+ if pos == -1 {
+ return fmt.Errorf("error parsing line: missing closing key quote: %s", line)
+ }
+ pos = pos + qLen
+ i = strings.IndexAny(line[pos:], "=:")
+ if i < 0 {
+ return fmt.Errorf("error parsing line: key-value delimiter not found: %s", line)
+ } else if i == pos {
+ return fmt.Errorf("error parsing line: key is empty: %s", line)
+ }
+ i = i + pos
+ kname = line[qLen:pos] // Just keep spaces inside quotes.
+ } else {
+ i = strings.IndexAny(line, "=:")
+ if i < 0 {
+ return fmt.Errorf("error parsing line: key-value delimiter not found: %s", line)
+ } else if i == 0 {
+ return fmt.Errorf("error parsing line: key is empty: %s", line)
+ }
+ kname = strings.TrimSpace(line[0:i])
+ }
+
+ isAutoIncr := false
+ // Auto increment.
+ if kname == "-" {
+ isAutoIncr = true
+ kname = "#" + fmt.Sprint(count)
+ count++
+ }
+
+ lineRight := strings.TrimSpace(line[i+1:])
+ lineRightLength := len(lineRight)
+ firstChar := ""
+ if lineRightLength >= 2 {
+ firstChar = lineRight[0:1]
+ }
+ if firstChar == "`" {
+ valQuote = "`"
+ } else if firstChar == `"` {
+ if lineRightLength >= 3 && lineRight[0:3] == `"""` {
+ valQuote = `"""`
+ } else {
+ valQuote = `"`
+ }
+ } else if firstChar == `'` {
+ valQuote = `'`
+ }
+
+ if len(valQuote) > 0 {
+ qLen := len(valQuote)
+ pos := strings.LastIndex(lineRight[qLen:], valQuote)
+ // For multiple-line value check.
+ if pos == -1 {
+ if valQuote == `"` || valQuote == `'` {
+ return fmt.Errorf("error parsing line: single quote does not allow multiple-line value: %s", line)
+ }
+
+ val = lineRight[qLen:] + "\n"
+ val, err = checkMultipleLines(buf, line, val, valQuote)
+ if err != nil {
+ return err
+ }
+ } else {
+ val = lineRight[qLen : pos+qLen]
+ }
+ } else {
+ val = strings.TrimSpace(cutComment(lineRight))
+ val, isEnd, err = checkContinuationLines(buf, val)
+ if err != nil {
+ return err
+ }
+ }
+
+ k, err := section.NewKey(kname, val)
+ if err != nil {
+ return err
+ }
+ k.isAutoIncr = isAutoIncr
+ if len(comments) > 0 {
+ k.Comment = comments
+ comments = ""
+ }
+
+ if isEnd {
+ break
+ }
+ }
+ return nil
+}
+
+func (f *File) reload(s dataSource) error {
+ r, err := s.ReadCloser()
+ if err != nil {
+ return err
+ }
+ defer r.Close()
+
+ return f.parse(r)
+}
+
+// Reload reloads and parses all data sources.
+func (f *File) Reload() (err error) {
+ for _, s := range f.dataSources {
+ if err = f.reload(s); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Append appends one or more data sources and reloads automatically.
+func (f *File) Append(source interface{}, others ...interface{}) error {
+ ds, err := parseDataSource(source)
+ if err != nil {
+ return err
+ }
+ f.dataSources = append(f.dataSources, ds)
+ for _, s := range others {
+ ds, err = parseDataSource(s)
+ if err != nil {
+ return err
+ }
+ f.dataSources = append(f.dataSources, ds)
+ }
+ return f.Reload()
+}
+
+// WriteToIndent writes file content into io.Writer with given value indention.
+func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) {
+ equalSign := "="
+ if PrettyFormat {
+ equalSign = " = "
+ }
+
+ // Use buffer to make sure target is safe until finish encoding.
+ buf := bytes.NewBuffer(nil)
+ for i, sname := range f.sectionList {
+ sec := f.Section(sname)
+ if len(sec.Comment) > 0 {
+ if sec.Comment[0] != '#' && sec.Comment[0] != ';' {
+ sec.Comment = "; " + sec.Comment
+ }
+ if _, err = buf.WriteString(sec.Comment + LineBreak); err != nil {
+ return 0, err
+ }
+ }
+
+ if i > 0 {
+ if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil {
+ return 0, err
+ }
+ } else {
+ // Write nothing if default section is empty.
+ if len(sec.keyList) == 0 {
+ continue
+ }
+ }
+
+ for _, kname := range sec.keyList {
+ key := sec.Key(kname)
+ if len(key.Comment) > 0 {
+ if len(indent) > 0 && sname != DEFAULT_SECTION {
+ buf.WriteString(indent)
+ }
+ if key.Comment[0] != '#' && key.Comment[0] != ';' {
+ key.Comment = "; " + key.Comment
+ }
+ if _, err = buf.WriteString(key.Comment + LineBreak); err != nil {
+ return 0, err
+ }
+ }
+
+ if len(indent) > 0 && sname != DEFAULT_SECTION {
+ buf.WriteString(indent)
+ }
+
+ switch {
+ case key.isAutoIncr:
+ kname = "-"
+ case strings.Contains(kname, "`") || strings.Contains(kname, `"`):
+ kname = `"""` + kname + `"""`
+ case strings.Contains(kname, `=`) || strings.Contains(kname, `:`):
+ kname = "`" + kname + "`"
+ }
+
+ val := key.value
+ // In case key value contains "\n", "`" or "\"".
+ if strings.Contains(val, "\n") || strings.Contains(val, "`") || strings.Contains(val, `"`) ||
+ strings.Contains(val, "#") {
+ val = `"""` + val + `"""`
+ }
+ if _, err = buf.WriteString(kname + equalSign + val + LineBreak); err != nil {
+ return 0, err
+ }
+ }
+
+ // Put a line between sections.
+ if _, err = buf.WriteString(LineBreak); err != nil {
+ return 0, err
+ }
+ }
+
+ return buf.WriteTo(w)
+}
+
+// WriteTo writes file content into io.Writer.
+func (f *File) WriteTo(w io.Writer) (int64, error) {
+ return f.WriteToIndent(w, "")
+}
+
+// SaveToIndent writes content to file system with given value indention.
+func (f *File) SaveToIndent(filename, indent string) error {
+ // Note: Because we are truncating with os.Create,
+ // so it's safer to save to a temporary file location and rename afte done.
+ tmpPath := filename + "." + strconv.Itoa(time.Now().Nanosecond()) + ".tmp"
+ defer os.Remove(tmpPath)
+
+ fw, err := os.Create(tmpPath)
+ if err != nil {
+ return err
+ }
+
+ if _, err = f.WriteToIndent(fw, indent); err != nil {
+ fw.Close()
+ return err
+ }
+ fw.Close()
+
+ // Remove old file and rename the new one.
+ os.Remove(filename)
+ return os.Rename(tmpPath, filename)
+}
+
+// SaveTo writes content to file system.
+func (f *File) SaveTo(filename string) error {
+ return f.SaveToIndent(filename, "")
+}
diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go
new file mode 100644
index 0000000..c118437
--- /dev/null
+++ b/vendor/github.com/go-ini/ini/struct.go
@@ -0,0 +1,350 @@
+// Copyright 2014 Unknwon
+//
+// Licensed under the Apache License, Version 2.0 (the "License"): you may
+// not use this file except in compliance with the License. You may obtain
+// a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations
+// under the License.
+
+package ini
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "reflect"
+ "time"
+ "unicode"
+)
+
+// NameMapper represents a ini tag name mapper.
+type NameMapper func(string) string
+
+// Built-in name getters.
+var (
+ // AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE.
+ AllCapsUnderscore NameMapper = func(raw string) string {
+ newstr := make([]rune, 0, len(raw))
+ for i, chr := range raw {
+ if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
+ if i > 0 {
+ newstr = append(newstr, '_')
+ }
+ }
+ newstr = append(newstr, unicode.ToUpper(chr))
+ }
+ return string(newstr)
+ }
+ // TitleUnderscore converts to format title_underscore.
+ TitleUnderscore NameMapper = func(raw string) string {
+ newstr := make([]rune, 0, len(raw))
+ for i, chr := range raw {
+ if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
+ if i > 0 {
+ newstr = append(newstr, '_')
+ }
+ chr -= ('A' - 'a')
+ }
+ newstr = append(newstr, chr)
+ }
+ return string(newstr)
+ }
+)
+
+func (s *Section) parseFieldName(raw, actual string) string {
+ if len(actual) > 0 {
+ return actual
+ }
+ if s.f.NameMapper != nil {
+ return s.f.NameMapper(raw)
+ }
+ return raw
+}
+
+func parseDelim(actual string) string {
+ if len(actual) > 0 {
+ return actual
+ }
+ return ","
+}
+
+var reflectTime = reflect.TypeOf(time.Now()).Kind()
+
+// setWithProperType sets proper value to field based on its type,
+// but it does not return error for failing parsing,
+// because we want to use default value that is already assigned to strcut.
+func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error {
+ switch t.Kind() {
+ case reflect.String:
+ if len(key.String()) == 0 {
+ return nil
+ }
+ field.SetString(key.String())
+ case reflect.Bool:
+ boolVal, err := key.Bool()
+ if err != nil {
+ return nil
+ }
+ field.SetBool(boolVal)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ durationVal, err := key.Duration()
+ if err == nil {
+ field.Set(reflect.ValueOf(durationVal))
+ return nil
+ }
+
+ intVal, err := key.Int64()
+ if err != nil {
+ return nil
+ }
+ field.SetInt(intVal)
+ // byte is an alias for uint8, so supporting uint8 breaks support for byte
+ case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ durationVal, err := key.Duration()
+ if err == nil {
+ field.Set(reflect.ValueOf(durationVal))
+ return nil
+ }
+
+ uintVal, err := key.Uint64()
+ if err != nil {
+ return nil
+ }
+ field.SetUint(uintVal)
+
+ case reflect.Float64:
+ floatVal, err := key.Float64()
+ if err != nil {
+ return nil
+ }
+ field.SetFloat(floatVal)
+ case reflectTime:
+ timeVal, err := key.Time()
+ if err != nil {
+ return nil
+ }
+ field.Set(reflect.ValueOf(timeVal))
+ case reflect.Slice:
+ vals := key.Strings(delim)
+ numVals := len(vals)
+ if numVals == 0 {
+ return nil
+ }
+
+ sliceOf := field.Type().Elem().Kind()
+
+ var times []time.Time
+ if sliceOf == reflectTime {
+ times = key.Times(delim)
+ }
+
+ slice := reflect.MakeSlice(field.Type(), numVals, numVals)
+ for i := 0; i < numVals; i++ {
+ switch sliceOf {
+ case reflectTime:
+ slice.Index(i).Set(reflect.ValueOf(times[i]))
+ default:
+ slice.Index(i).Set(reflect.ValueOf(vals[i]))
+ }
+ }
+ field.Set(slice)
+ default:
+ return fmt.Errorf("unsupported type '%s'", t)
+ }
+ return nil
+}
+
+func (s *Section) mapTo(val reflect.Value) error {
+ if val.Kind() == reflect.Ptr {
+ val = val.Elem()
+ }
+ typ := val.Type()
+
+ for i := 0; i < typ.NumField(); i++ {
+ field := val.Field(i)
+ tpField := typ.Field(i)
+
+ tag := tpField.Tag.Get("ini")
+ if tag == "-" {
+ continue
+ }
+
+ fieldName := s.parseFieldName(tpField.Name, tag)
+ if len(fieldName) == 0 || !field.CanSet() {
+ continue
+ }
+
+ isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous
+ isStruct := tpField.Type.Kind() == reflect.Struct
+ if isAnonymous {
+ field.Set(reflect.New(tpField.Type.Elem()))
+ }
+
+ if isAnonymous || isStruct {
+ if sec, err := s.f.GetSection(fieldName); err == nil {
+ if err = sec.mapTo(field); err != nil {
+ return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
+ }
+ continue
+ }
+ }
+
+ if key, err := s.GetKey(fieldName); err == nil {
+ if err = setWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil {
+ return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
+ }
+ }
+ }
+ return nil
+}
+
+// MapTo maps section to given struct.
+func (s *Section) MapTo(v interface{}) error {
+ typ := reflect.TypeOf(v)
+ val := reflect.ValueOf(v)
+ if typ.Kind() == reflect.Ptr {
+ typ = typ.Elem()
+ val = val.Elem()
+ } else {
+ return errors.New("cannot map to non-pointer struct")
+ }
+
+ return s.mapTo(val)
+}
+
+// MapTo maps file to given struct.
+func (f *File) MapTo(v interface{}) error {
+ return f.Section("").MapTo(v)
+}
+
+// MapTo maps data sources to given struct with name mapper.
+func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
+ cfg, err := Load(source, others...)
+ if err != nil {
+ return err
+ }
+ cfg.NameMapper = mapper
+ return cfg.MapTo(v)
+}
+
+// MapTo maps data sources to given struct.
+func MapTo(v, source interface{}, others ...interface{}) error {
+ return MapToWithMapper(v, nil, source, others...)
+}
+
+// reflectWithProperType does the opposite thing with setWithProperType.
+func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error {
+ switch t.Kind() {
+ case reflect.String:
+ key.SetValue(field.String())
+ case reflect.Bool,
+ reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+ reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
+ reflect.Float64,
+ reflectTime:
+ key.SetValue(fmt.Sprint(field))
+ case reflect.Slice:
+ vals := field.Slice(0, field.Len())
+ if field.Len() == 0 {
+ return nil
+ }
+
+ var buf bytes.Buffer
+ isTime := fmt.Sprint(field.Type()) == "[]time.Time"
+ for i := 0; i < field.Len(); i++ {
+ if isTime {
+ buf.WriteString(vals.Index(i).Interface().(time.Time).Format(time.RFC3339))
+ } else {
+ buf.WriteString(fmt.Sprint(vals.Index(i)))
+ }
+ buf.WriteString(delim)
+ }
+ key.SetValue(buf.String()[:buf.Len()-1])
+ default:
+ return fmt.Errorf("unsupported type '%s'", t)
+ }
+ return nil
+}
+
+func (s *Section) reflectFrom(val reflect.Value) error {
+ if val.Kind() == reflect.Ptr {
+ val = val.Elem()
+ }
+ typ := val.Type()
+
+ for i := 0; i < typ.NumField(); i++ {
+ field := val.Field(i)
+ tpField := typ.Field(i)
+
+ tag := tpField.Tag.Get("ini")
+ if tag == "-" {
+ continue
+ }
+
+ fieldName := s.parseFieldName(tpField.Name, tag)
+ if len(fieldName) == 0 || !field.CanSet() {
+ continue
+ }
+
+ if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) ||
+ (tpField.Type.Kind() == reflect.Struct) {
+ // Note: The only error here is section doesn't exist.
+ sec, err := s.f.GetSection(fieldName)
+ if err != nil {
+ // Note: fieldName can never be empty here, ignore error.
+ sec, _ = s.f.NewSection(fieldName)
+ }
+ if err = sec.reflectFrom(field); err != nil {
+ return fmt.Errorf("error reflecting field(%s): %v", fieldName, err)
+ }
+ continue
+ }
+
+ // Note: Same reason as secion.
+ key, err := s.GetKey(fieldName)
+ if err != nil {
+ key, _ = s.NewKey(fieldName, "")
+ }
+ if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil {
+ return fmt.Errorf("error reflecting field(%s): %v", fieldName, err)
+ }
+
+ }
+ return nil
+}
+
+// ReflectFrom reflects secion from given struct.
+func (s *Section) ReflectFrom(v interface{}) error {
+ typ := reflect.TypeOf(v)
+ val := reflect.ValueOf(v)
+ if typ.Kind() == reflect.Ptr {
+ typ = typ.Elem()
+ val = val.Elem()
+ } else {
+ return errors.New("cannot reflect from non-pointer struct")
+ }
+
+ return s.reflectFrom(val)
+}
+
+// ReflectFrom reflects file from given struct.
+func (f *File) ReflectFrom(v interface{}) error {
+ return f.Section("").ReflectFrom(v)
+}
+
+// ReflectFrom reflects data sources from given struct with name mapper.
+func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error {
+ cfg.NameMapper = mapper
+ return cfg.ReflectFrom(v)
+}
+
+// ReflectFrom reflects data sources from given struct.
+func ReflectFrom(cfg *File, v interface{}) error {
+ return ReflectFromWithMapper(cfg, v, nil)
+}
diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE
new file mode 100644
index 0000000..1b1b192
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/LICENSE
@@ -0,0 +1,31 @@
+Go support for Protocol Buffers - Google's data interchange format
+
+Copyright 2010 The Go Authors. All rights reserved.
+https://github.com/golang/protobuf
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/golang/protobuf/proto/Makefile b/vendor/github.com/golang/protobuf/proto/Makefile
new file mode 100644
index 0000000..e2e0651
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/Makefile
@@ -0,0 +1,43 @@
+# Go support for Protocol Buffers - Google's data interchange format
+#
+# Copyright 2010 The Go Authors. All rights reserved.
+# https://github.com/golang/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+install:
+ go install
+
+test: install generate-test-pbs
+ go test
+
+
+generate-test-pbs:
+ make install
+ make -C testdata
+ protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto
+ make
diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go
new file mode 100644
index 0000000..e98ddec
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/clone.go
@@ -0,0 +1,223 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer deep copy and merge.
+// TODO: RawMessage.
+
+package proto
+
+import (
+ "log"
+ "reflect"
+ "strings"
+)
+
+// Clone returns a deep copy of a protocol buffer.
+func Clone(pb Message) Message {
+ in := reflect.ValueOf(pb)
+ if in.IsNil() {
+ return pb
+ }
+
+ out := reflect.New(in.Type().Elem())
+ // out is empty so a merge is a deep copy.
+ mergeStruct(out.Elem(), in.Elem())
+ return out.Interface().(Message)
+}
+
+// Merge merges src into dst.
+// Required and optional fields that are set in src will be set to that value in dst.
+// Elements of repeated fields will be appended.
+// Merge panics if src and dst are not the same type, or if dst is nil.
+func Merge(dst, src Message) {
+ in := reflect.ValueOf(src)
+ out := reflect.ValueOf(dst)
+ if out.IsNil() {
+ panic("proto: nil destination")
+ }
+ if in.Type() != out.Type() {
+ // Explicit test prior to mergeStruct so that mistyped nils will fail
+ panic("proto: type mismatch")
+ }
+ if in.IsNil() {
+ // Merging nil into non-nil is a quiet no-op
+ return
+ }
+ mergeStruct(out.Elem(), in.Elem())
+}
+
+func mergeStruct(out, in reflect.Value) {
+ sprop := GetProperties(in.Type())
+ for i := 0; i < in.NumField(); i++ {
+ f := in.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
+ }
+
+ if emIn, ok := in.Addr().Interface().(extendableProto); ok {
+ emOut := out.Addr().Interface().(extendableProto)
+ mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap())
+ }
+
+ uf := in.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return
+ }
+ uin := uf.Bytes()
+ if len(uin) > 0 {
+ out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
+ }
+}
+
+// mergeAny performs a merge between two values of the same type.
+// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
+// prop is set if this is a struct field (it may be nil).
+func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
+ if in.Type() == protoMessageType {
+ if !in.IsNil() {
+ if out.IsNil() {
+ out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
+ } else {
+ Merge(out.Interface().(Message), in.Interface().(Message))
+ }
+ }
+ return
+ }
+ switch in.Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ if !viaPtr && isProto3Zero(in) {
+ return
+ }
+ out.Set(in)
+ case reflect.Interface:
+ // Probably a oneof field; copy non-nil values.
+ if in.IsNil() {
+ return
+ }
+ // Allocate destination if it is not set, or set to a different type.
+ // Otherwise we will merge as normal.
+ if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
+ out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
+ }
+ mergeAny(out.Elem(), in.Elem(), false, nil)
+ case reflect.Map:
+ if in.Len() == 0 {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(in.Type()))
+ }
+ // For maps with value types of *T or []byte we need to deep copy each value.
+ elemKind := in.Type().Elem().Kind()
+ for _, key := range in.MapKeys() {
+ var val reflect.Value
+ switch elemKind {
+ case reflect.Ptr:
+ val = reflect.New(in.Type().Elem().Elem())
+ mergeAny(val, in.MapIndex(key), false, nil)
+ case reflect.Slice:
+ val = in.MapIndex(key)
+ val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+ default:
+ val = in.MapIndex(key)
+ }
+ out.SetMapIndex(key, val)
+ }
+ case reflect.Ptr:
+ if in.IsNil() {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.New(in.Elem().Type()))
+ }
+ mergeAny(out.Elem(), in.Elem(), true, nil)
+ case reflect.Slice:
+ if in.IsNil() {
+ return
+ }
+ if in.Type().Elem().Kind() == reflect.Uint8 {
+ // []byte is a scalar bytes field, not a repeated field.
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value, and should not
+ // be merged.
+ if prop != nil && prop.proto3 && in.Len() == 0 {
+ return
+ }
+
+ // Make a deep copy.
+ // Append to []byte{} instead of []byte(nil) so that we never end up
+ // with a nil result.
+ out.SetBytes(append([]byte{}, in.Bytes()...))
+ return
+ }
+ n := in.Len()
+ if out.IsNil() {
+ out.Set(reflect.MakeSlice(in.Type(), 0, n))
+ }
+ switch in.Type().Elem().Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ out.Set(reflect.AppendSlice(out, in))
+ default:
+ for i := 0; i < n; i++ {
+ x := reflect.Indirect(reflect.New(in.Type().Elem()))
+ mergeAny(x, in.Index(i), false, nil)
+ out.Set(reflect.Append(out, x))
+ }
+ }
+ case reflect.Struct:
+ mergeStruct(out, in)
+ default:
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to copy %v", in)
+ }
+}
+
+func mergeExtension(out, in map[int32]Extension) {
+ for extNum, eIn := range in {
+ eOut := Extension{desc: eIn.desc}
+ if eIn.value != nil {
+ v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
+ mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
+ eOut.value = v.Interface()
+ }
+ if eIn.enc != nil {
+ eOut.enc = make([]byte, len(eIn.enc))
+ copy(eOut.enc, eIn.enc)
+ }
+
+ out[extNum] = eOut
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go
new file mode 100644
index 0000000..f94b9f4
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/decode.go
@@ -0,0 +1,868 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for decoding protocol buffer data to construct in-memory representations.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+)
+
+// errOverflow is returned when an integer is too large to be represented.
+var errOverflow = errors.New("proto: integer overflow")
+
+// ErrInternalBadWireType is returned by generated code when an incorrect
+// wire type is encountered. It does not get returned to user code.
+var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+
+// The fundamental decoders that interpret bytes on the wire.
+// Those that take integer types all return uint64 and are
+// therefore of type valueDecoder.
+
+// DecodeVarint reads a varint-encoded integer from the slice.
+// It returns the integer and the number of bytes consumed, or
+// zero if there is not enough.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func DecodeVarint(buf []byte) (x uint64, n int) {
+ // x, n already 0
+ for shift := uint(0); shift < 64; shift += 7 {
+ if n >= len(buf) {
+ return 0, 0
+ }
+ b := uint64(buf[n])
+ n++
+ x |= (b & 0x7F) << shift
+ if (b & 0x80) == 0 {
+ return x, n
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ return 0, 0
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) DecodeVarint() (x uint64, err error) {
+ // x, err already 0
+
+ i := p.index
+ l := len(p.buf)
+
+ for shift := uint(0); shift < 64; shift += 7 {
+ if i >= l {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ b := p.buf[i]
+ i++
+ x |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ p.index = i
+ return
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ err = errOverflow
+ return
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) DecodeFixed64() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 8
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-8])
+ x |= uint64(p.buf[i-7]) << 8
+ x |= uint64(p.buf[i-6]) << 16
+ x |= uint64(p.buf[i-5]) << 24
+ x |= uint64(p.buf[i-4]) << 32
+ x |= uint64(p.buf[i-3]) << 40
+ x |= uint64(p.buf[i-2]) << 48
+ x |= uint64(p.buf[i-1]) << 56
+ return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) DecodeFixed32() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 4
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-4])
+ x |= uint64(p.buf[i-3]) << 8
+ x |= uint64(p.buf[i-2]) << 16
+ x |= uint64(p.buf[i-1]) << 24
+ return
+}
+
+// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
+// from the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
+ return
+}
+
+// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
+// from the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
+ return
+}
+
+// These are not ValueDecoders: they produce an array of bytes or a string.
+// bytes, embedded messages
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+ n, err := p.DecodeVarint()
+ if err != nil {
+ return nil, err
+ }
+
+ nb := int(n)
+ if nb < 0 {
+ return nil, fmt.Errorf("proto: bad byte length %d", nb)
+ }
+ end := p.index + nb
+ if end < p.index || end > len(p.buf) {
+ return nil, io.ErrUnexpectedEOF
+ }
+
+ if !alloc {
+ // todo: check if can get more uses of alloc=false
+ buf = p.buf[p.index:end]
+ p.index += nb
+ return
+ }
+
+ buf = make([]byte, nb)
+ copy(buf, p.buf[p.index:])
+ p.index += nb
+ return
+}
+
+// DecodeStringBytes reads an encoded string from the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) DecodeStringBytes() (s string, err error) {
+ buf, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return
+ }
+ return string(buf), nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+// If the protocol buffer has extensions, and the field matches, add it as an extension.
+// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
+func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
+ oi := o.index
+
+ err := o.skip(t, tag, wire)
+ if err != nil {
+ return err
+ }
+
+ if !unrecField.IsValid() {
+ return nil
+ }
+
+ ptr := structPointer_Bytes(base, unrecField)
+
+ // Add the skipped field to struct field
+ obuf := o.buf
+
+ o.buf = *ptr
+ o.EncodeVarint(uint64(tag<<3 | wire))
+ *ptr = append(o.buf, obuf[oi:o.index]...)
+
+ o.buf = obuf
+
+ return nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
+
+ var u uint64
+ var err error
+
+ switch wire {
+ case WireVarint:
+ _, err = o.DecodeVarint()
+ case WireFixed64:
+ _, err = o.DecodeFixed64()
+ case WireBytes:
+ _, err = o.DecodeRawBytes(false)
+ case WireFixed32:
+ _, err = o.DecodeFixed32()
+ case WireStartGroup:
+ for {
+ u, err = o.DecodeVarint()
+ if err != nil {
+ break
+ }
+ fwire := int(u & 0x7)
+ if fwire == WireEndGroup {
+ break
+ }
+ ftag := int(u >> 3)
+ err = o.skip(t, ftag, fwire)
+ if err != nil {
+ break
+ }
+ }
+ default:
+ err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
+ }
+ return err
+}
+
+// Unmarshaler is the interface representing objects that can
+// unmarshal themselves. The method should reset the receiver before
+// decoding starts. The argument points to data that may be
+// overwritten, so implementations should not keep references to the
+// buffer.
+type Unmarshaler interface {
+ Unmarshal([]byte) error
+}
+
+// Unmarshal parses the protocol buffer representation in buf and places the
+// decoded result in pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// Unmarshal resets pb before starting to unmarshal, so any
+// existing data in pb is always removed. Use UnmarshalMerge
+// to preserve and append to existing data.
+func Unmarshal(buf []byte, pb Message) error {
+ pb.Reset()
+ return UnmarshalMerge(buf, pb)
+}
+
+// UnmarshalMerge parses the protocol buffer representation in buf and
+// writes the decoded result to pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// UnmarshalMerge merges into existing data in pb.
+// Most code should use Unmarshal instead.
+func UnmarshalMerge(buf []byte, pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(Unmarshaler); ok {
+ return u.Unmarshal(buf)
+ }
+ return NewBuffer(buf).Unmarshal(pb)
+}
+
+// DecodeMessage reads a count-delimited message from the Buffer.
+func (p *Buffer) DecodeMessage(pb Message) error {
+ enc, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ return NewBuffer(enc).Unmarshal(pb)
+}
+
+// DecodeGroup reads a tag-delimited group from the Buffer.
+func (p *Buffer) DecodeGroup(pb Message) error {
+ typ, base, err := getbase(pb)
+ if err != nil {
+ return err
+ }
+ return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base)
+}
+
+// Unmarshal parses the protocol buffer representation in the
+// Buffer and places the decoded result in pb. If the struct
+// underlying pb does not match the data in the buffer, the results can be
+// unpredictable.
+func (p *Buffer) Unmarshal(pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(Unmarshaler); ok {
+ err := u.Unmarshal(p.buf[p.index:])
+ p.index = len(p.buf)
+ return err
+ }
+
+ typ, base, err := getbase(pb)
+ if err != nil {
+ return err
+ }
+
+ err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
+
+ if collectStats {
+ stats.Decode++
+ }
+
+ return err
+}
+
+// unmarshalType does the work of unmarshaling a structure.
+func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
+ var state errorState
+ required, reqFields := prop.reqCount, uint64(0)
+
+ var err error
+ for err == nil && o.index < len(o.buf) {
+ oi := o.index
+ var u uint64
+ u, err = o.DecodeVarint()
+ if err != nil {
+ break
+ }
+ wire := int(u & 0x7)
+ if wire == WireEndGroup {
+ if is_group {
+ return nil // input is satisfied
+ }
+ return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
+ }
+ tag := int(u >> 3)
+ if tag <= 0 {
+ return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
+ }
+ fieldnum, ok := prop.decoderTags.get(tag)
+ if !ok {
+ // Maybe it's an extension?
+ if prop.extendable {
+ if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) {
+ if err = o.skip(st, tag, wire); err == nil {
+ ext := e.ExtensionMap()[int32(tag)] // may be missing
+ ext.enc = append(ext.enc, o.buf[oi:o.index]...)
+ e.ExtensionMap()[int32(tag)] = ext
+ }
+ continue
+ }
+ }
+ // Maybe it's a oneof?
+ if prop.oneofUnmarshaler != nil {
+ m := structPointer_Interface(base, st).(Message)
+ // First return value indicates whether tag is a oneof field.
+ ok, err = prop.oneofUnmarshaler(m, tag, wire, o)
+ if err == ErrInternalBadWireType {
+ // Map the error to something more descriptive.
+ // Do the formatting here to save generated code space.
+ err = fmt.Errorf("bad wiretype for oneof field in %T", m)
+ }
+ if ok {
+ continue
+ }
+ }
+ err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
+ continue
+ }
+ p := prop.Prop[fieldnum]
+
+ if p.dec == nil {
+ fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
+ continue
+ }
+ dec := p.dec
+ if wire != WireStartGroup && wire != p.WireType {
+ if wire == WireBytes && p.packedDec != nil {
+ // a packable field
+ dec = p.packedDec
+ } else {
+ err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
+ continue
+ }
+ }
+ decErr := dec(o, p, base)
+ if decErr != nil && !state.shouldContinue(decErr, p) {
+ err = decErr
+ }
+ if err == nil && p.Required {
+ // Successfully decoded a required field.
+ if tag <= 64 {
+ // use bitmap for fields 1-64 to catch field reuse.
+ var mask uint64 = 1 << uint64(tag-1)
+ if reqFields&mask == 0 {
+ // new required field
+ reqFields |= mask
+ required--
+ }
+ } else {
+ // This is imprecise. It can be fooled by a required field
+ // with a tag > 64 that is encoded twice; that's very rare.
+ // A fully correct implementation would require allocating
+ // a data structure, which we would like to avoid.
+ required--
+ }
+ }
+ }
+ if err == nil {
+ if is_group {
+ return io.ErrUnexpectedEOF
+ }
+ if state.err != nil {
+ return state.err
+ }
+ if required > 0 {
+ // Not enough information to determine the exact field. If we use extra
+ // CPU, we could determine the field only if the missing required field
+ // has a tag <= 64 and we check reqFields.
+ return &RequiredNotSetError{"{Unknown}"}
+ }
+ }
+ return err
+}
+
+// Individual type decoders
+// For each,
+// u is the decoded value,
+// v is a pointer to the field (pointer) in the struct
+
+// Sizes of the pools to allocate inside the Buffer.
+// The goal is modest amortization and allocation
+// on at least 16-byte boundaries.
+const (
+ boolPoolSize = 16
+ uint32PoolSize = 8
+ uint64PoolSize = 4
+)
+
+// Decode a bool.
+func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ if len(o.bools) == 0 {
+ o.bools = make([]bool, boolPoolSize)
+ }
+ o.bools[0] = u != 0
+ *structPointer_Bool(base, p.field) = &o.bools[0]
+ o.bools = o.bools[1:]
+ return nil
+}
+
+func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ *structPointer_BoolVal(base, p.field) = u != 0
+ return nil
+}
+
+// Decode an int32.
+func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
+ return nil
+}
+
+func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
+ return nil
+}
+
+// Decode an int64.
+func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word64_Set(structPointer_Word64(base, p.field), o, u)
+ return nil
+}
+
+func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
+ return nil
+}
+
+// Decode a string.
+func (o *Buffer) dec_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ *structPointer_String(base, p.field) = &s
+ return nil
+}
+
+func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ *structPointer_StringVal(base, p.field) = s
+ return nil
+}
+
+// Decode a slice of bytes ([]byte).
+func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ *structPointer_Bytes(base, p.field) = b
+ return nil
+}
+
+// Decode a slice of bools ([]bool).
+func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v := structPointer_BoolSlice(base, p.field)
+ *v = append(*v, u != 0)
+ return nil
+}
+
+// Decode a slice of bools ([]bool) in packed format.
+func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
+ v := structPointer_BoolSlice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded bools
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+
+ y := *v
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ y = append(y, u != 0)
+ }
+
+ *v = y
+ return nil
+}
+
+// Decode a slice of int32s ([]int32).
+func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ structPointer_Word32Slice(base, p.field).Append(uint32(u))
+ return nil
+}
+
+// Decode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Slice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded int32s
+
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v.Append(uint32(u))
+ }
+ return nil
+}
+
+// Decode a slice of int64s ([]int64).
+func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+
+ structPointer_Word64Slice(base, p.field).Append(u)
+ return nil
+}
+
+// Decode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Slice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded int64s
+
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v.Append(u)
+ }
+ return nil
+}
+
+// Decode a slice of strings ([]string).
+func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ v := structPointer_StringSlice(base, p.field)
+ *v = append(*v, s)
+ return nil
+}
+
+// Decode a slice of slice of bytes ([][]byte).
+func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ v := structPointer_BytesSlice(base, p.field)
+ *v = append(*v, b)
+ return nil
+}
+
+// Decode a map field.
+func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ oi := o.index // index at the end of this map entry
+ o.index -= len(raw) // move buffer back to start of map entry
+
+ mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V
+ if mptr.Elem().IsNil() {
+ mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
+ }
+ v := mptr.Elem() // map[K]V
+
+ // Prepare addressable doubly-indirect placeholders for the key and value types.
+ // See enc_new_map for why.
+ keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
+ keybase := toStructPointer(keyptr.Addr()) // **K
+
+ var valbase structPointer
+ var valptr reflect.Value
+ switch p.mtype.Elem().Kind() {
+ case reflect.Slice:
+ // []byte
+ var dummy []byte
+ valptr = reflect.ValueOf(&dummy) // *[]byte
+ valbase = toStructPointer(valptr) // *[]byte
+ case reflect.Ptr:
+ // message; valptr is **Msg; need to allocate the intermediate pointer
+ valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+ valptr.Set(reflect.New(valptr.Type().Elem()))
+ valbase = toStructPointer(valptr)
+ default:
+ // everything else
+ valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+ valbase = toStructPointer(valptr.Addr()) // **V
+ }
+
+ // Decode.
+ // This parses a restricted wire format, namely the encoding of a message
+ // with two fields. See enc_new_map for the format.
+ for o.index < oi {
+ // tagcode for key and value properties are always a single byte
+ // because they have tags 1 and 2.
+ tagcode := o.buf[o.index]
+ o.index++
+ switch tagcode {
+ case p.mkeyprop.tagcode[0]:
+ if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
+ return err
+ }
+ case p.mvalprop.tagcode[0]:
+ if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
+ return err
+ }
+ default:
+ // TODO: Should we silently skip this instead?
+ return fmt.Errorf("proto: bad map data tag %d", raw[0])
+ }
+ }
+ keyelem, valelem := keyptr.Elem(), valptr.Elem()
+ if !keyelem.IsValid() {
+ keyelem = reflect.Zero(p.mtype.Key())
+ }
+ if !valelem.IsValid() {
+ valelem = reflect.Zero(p.mtype.Elem())
+ }
+
+ v.SetMapIndex(keyelem, valelem)
+ return nil
+}
+
+// Decode a group.
+func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
+ bas := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(bas) {
+ // allocate new nested message
+ bas = toStructPointer(reflect.New(p.stype))
+ structPointer_SetStructPointer(base, p.field, bas)
+ }
+ return o.unmarshalType(p.stype, p.sprop, true, bas)
+}
+
+// Decode an embedded message.
+func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
+ raw, e := o.DecodeRawBytes(false)
+ if e != nil {
+ return e
+ }
+
+ bas := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(bas) {
+ // allocate new nested message
+ bas = toStructPointer(reflect.New(p.stype))
+ structPointer_SetStructPointer(base, p.field, bas)
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ iv := structPointer_Interface(bas, p.stype)
+ return iv.(Unmarshaler).Unmarshal(raw)
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, false, bas)
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
+
+// Decode a slice of embedded messages.
+func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
+ return o.dec_slice_struct(p, false, base)
+}
+
+// Decode a slice of embedded groups.
+func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
+ return o.dec_slice_struct(p, true, base)
+}
+
+// Decode a slice of structs ([]*struct).
+func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
+ v := reflect.New(p.stype)
+ bas := toStructPointer(v)
+ structPointer_StructPointerSlice(base, p.field).Append(bas)
+
+ if is_group {
+ err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
+ return err
+ }
+
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ iv := v.Interface()
+ return iv.(Unmarshaler).Unmarshal(raw)
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
+
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go
new file mode 100644
index 0000000..eb7e047
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/encode.go
@@ -0,0 +1,1331 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+)
+
+// RequiredNotSetError is the error returned if Marshal is called with
+// a protocol buffer struct whose required fields have not
+// all been initialized. It is also the error returned if Unmarshal is
+// called with an encoded protocol buffer that does not include all the
+// required fields.
+//
+// When printed, RequiredNotSetError reports the first unset required field in a
+// message. If the field cannot be precisely determined, it is reported as
+// "{Unknown}".
+type RequiredNotSetError struct {
+ field string
+}
+
+func (e *RequiredNotSetError) Error() string {
+ return fmt.Sprintf("proto: required field %q not set", e.field)
+}
+
+var (
+ // errRepeatedHasNil is the error returned if Marshal is called with
+ // a struct with a repeated field containing a nil element.
+ errRepeatedHasNil = errors.New("proto: repeated field has nil element")
+
+ // errOneofHasNil is the error returned if Marshal is called with
+ // a struct with a oneof field containing a nil element.
+ errOneofHasNil = errors.New("proto: oneof field has nil value")
+
+ // ErrNil is the error returned if Marshal is called with nil.
+ ErrNil = errors.New("proto: Marshal called with nil")
+)
+
+// The fundamental encoders that put bytes on the wire.
+// Those that take integer types all accept uint64 and are
+// therefore of type valueEncoder.
+
+const maxVarintBytes = 10 // maximum length of a varint
+
+// EncodeVarint returns the varint encoding of x.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+// Not used by the package itself, but helpful to clients
+// wishing to use the same encoding.
+func EncodeVarint(x uint64) []byte {
+ var buf [maxVarintBytes]byte
+ var n int
+ for n = 0; x > 127; n++ {
+ buf[n] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ buf[n] = uint8(x)
+ n++
+ return buf[0:n]
+}
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) EncodeVarint(x uint64) error {
+ for x >= 1<<7 {
+ p.buf = append(p.buf, uint8(x&0x7f|0x80))
+ x >>= 7
+ }
+ p.buf = append(p.buf, uint8(x))
+ return nil
+}
+
+// SizeVarint returns the varint encoding size of an integer.
+func SizeVarint(x uint64) int {
+ return sizeVarint(x)
+}
+
+func sizeVarint(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) EncodeFixed64(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24),
+ uint8(x>>32),
+ uint8(x>>40),
+ uint8(x>>48),
+ uint8(x>>56))
+ return nil
+}
+
+func sizeFixed64(x uint64) int {
+ return 8
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) EncodeFixed32(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24))
+ return nil
+}
+
+func sizeFixed32(x uint64) int {
+ return 4
+}
+
+// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
+// to the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) EncodeZigzag64(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+func sizeZigzag64(x uint64) int {
+ return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
+// to the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) EncodeZigzag32(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+func sizeZigzag32(x uint64) int {
+ return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) EncodeRawBytes(b []byte) error {
+ p.EncodeVarint(uint64(len(b)))
+ p.buf = append(p.buf, b...)
+ return nil
+}
+
+func sizeRawBytes(b []byte) int {
+ return sizeVarint(uint64(len(b))) +
+ len(b)
+}
+
+// EncodeStringBytes writes an encoded string to the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) EncodeStringBytes(s string) error {
+ p.EncodeVarint(uint64(len(s)))
+ p.buf = append(p.buf, s...)
+ return nil
+}
+
+func sizeStringBytes(s string) int {
+ return sizeVarint(uint64(len(s))) +
+ len(s)
+}
+
+// Marshaler is the interface representing objects that can marshal themselves.
+type Marshaler interface {
+ Marshal() ([]byte, error)
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, returning the data.
+func Marshal(pb Message) ([]byte, error) {
+ // Can the object marshal itself?
+ if m, ok := pb.(Marshaler); ok {
+ return m.Marshal()
+ }
+ p := NewBuffer(nil)
+ err := p.Marshal(pb)
+ var state errorState
+ if err != nil && !state.shouldContinue(err, nil) {
+ return nil, err
+ }
+ if p.buf == nil && err == nil {
+ // Return a non-nil slice on success.
+ return []byte{}, nil
+ }
+ return p.buf, err
+}
+
+// EncodeMessage writes the protocol buffer to the Buffer,
+// prefixed by a varint-encoded length.
+func (p *Buffer) EncodeMessage(pb Message) error {
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return ErrNil
+ }
+ if err == nil {
+ var state errorState
+ err = p.enc_len_struct(GetProperties(t.Elem()), base, &state)
+ }
+ return err
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, writing the result to the
+// Buffer.
+func (p *Buffer) Marshal(pb Message) error {
+ // Can the object marshal itself?
+ if m, ok := pb.(Marshaler); ok {
+ data, err := m.Marshal()
+ if err != nil {
+ return err
+ }
+ p.buf = append(p.buf, data...)
+ return nil
+ }
+
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return ErrNil
+ }
+ if err == nil {
+ err = p.enc_struct(GetProperties(t.Elem()), base)
+ }
+
+ if collectStats {
+ stats.Encode++
+ }
+
+ return err
+}
+
+// Size returns the encoded size of a protocol buffer.
+func Size(pb Message) (n int) {
+ // Can the object marshal itself? If so, Size is slow.
+ // TODO: add Size to Marshaler, or add a Sizer interface.
+ if m, ok := pb.(Marshaler); ok {
+ b, _ := m.Marshal()
+ return len(b)
+ }
+
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return 0
+ }
+ if err == nil {
+ n = size_struct(GetProperties(t.Elem()), base)
+ }
+
+ if collectStats {
+ stats.Size++
+ }
+
+ return
+}
+
+// Individual type encoders.
+
+// Encode a bool.
+func (o *Buffer) enc_bool(p *Properties, base structPointer) error {
+ v := *structPointer_Bool(base, p.field)
+ if v == nil {
+ return ErrNil
+ }
+ x := 0
+ if *v {
+ x = 1
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error {
+ v := *structPointer_BoolVal(base, p.field)
+ if !v {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, 1)
+ return nil
+}
+
+func size_bool(p *Properties, base structPointer) int {
+ v := *structPointer_Bool(base, p.field)
+ if v == nil {
+ return 0
+ }
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+func size_proto3_bool(p *Properties, base structPointer) int {
+ v := *structPointer_BoolVal(base, p.field)
+ if !v && !p.oneof {
+ return 0
+ }
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+// Encode an int32.
+func (o *Buffer) enc_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return ErrNil
+ }
+ x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return 0
+ }
+ x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func size_proto3_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode a uint32.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return ErrNil
+ }
+ x := word32_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return 0
+ }
+ x := word32_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func size_proto3_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode an int64.
+func (o *Buffer) enc_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64(base, p.field)
+ if word64_IsNil(v) {
+ return ErrNil
+ }
+ x := word64_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func size_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64(base, p.field)
+ if word64_IsNil(v) {
+ return 0
+ }
+ x := word64_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+func size_proto3_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ if x == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+// Encode a string.
+func (o *Buffer) enc_string(p *Properties, base structPointer) error {
+ v := *structPointer_String(base, p.field)
+ if v == nil {
+ return ErrNil
+ }
+ x := *v
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(x)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error {
+ v := *structPointer_StringVal(base, p.field)
+ if v == "" {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(v)
+ return nil
+}
+
+func size_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_String(base, p.field)
+ if v == nil {
+ return 0
+ }
+ x := *v
+ n += len(p.tagcode)
+ n += sizeStringBytes(x)
+ return
+}
+
+func size_proto3_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_StringVal(base, p.field)
+ if v == "" && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeStringBytes(v)
+ return
+}
+
+// All protocol buffer fields are nillable, but be careful.
+func isNil(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ return v.IsNil()
+ }
+ return false
+}
+
+// Encode a message struct.
+func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ structp := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return ErrNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ return state.err
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ return o.enc_len_struct(p.sprop, structp, &state)
+}
+
+func size_struct_message(p *Properties, base structPointer) int {
+ structp := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return 0
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n0 := len(p.tagcode)
+ n1 := sizeRawBytes(data)
+ return n0 + n1
+ }
+
+ n0 := len(p.tagcode)
+ n1 := size_struct(p.sprop, structp)
+ n2 := sizeVarint(uint64(n1)) // size of encoded length
+ return n0 + n1 + n2
+}
+
+// Encode a group struct.
+func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error {
+ var state errorState
+ b := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(b) {
+ return ErrNil
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+ err := o.enc_struct(p.sprop, b)
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ return state.err
+}
+
+func size_struct_group(p *Properties, base structPointer) (n int) {
+ b := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(b) {
+ return 0
+ }
+
+ n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup))
+ n += size_struct(p.sprop, b)
+ n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ return
+}
+
+// Encode a slice of bools ([]bool).
+func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return ErrNil
+ }
+ for _, x := range s {
+ o.buf = append(o.buf, p.tagcode...)
+ v := uint64(0)
+ if x {
+ v = 1
+ }
+ p.valEnc(o, v)
+ }
+ return nil
+}
+
+func size_slice_bool(p *Properties, base structPointer) int {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return 0
+ }
+ return l * (len(p.tagcode) + 1) // each bool takes exactly one byte
+}
+
+// Encode a slice of bools ([]bool) in packed format.
+func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(l)) // each bool takes exactly one byte
+ for _, x := range s {
+ v := uint64(0)
+ if x {
+ v = 1
+ }
+ p.valEnc(o, v)
+ }
+ return nil
+}
+
+func size_slice_packed_bool(p *Properties, base structPointer) (n int) {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(l))
+ n += l // each bool takes exactly one byte
+ return
+}
+
+// Encode a slice of bytes ([]byte).
+func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(s)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if len(s) == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(s)
+ return nil
+}
+
+func size_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeRawBytes(s)
+ return
+}
+
+func size_proto3_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if len(s) == 0 && !p.oneof {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeRawBytes(s)
+ return
+}
+
+// Encode a slice of int32s ([]int32).
+func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ p.valEnc(o, uint64(x))
+ }
+ return nil
+}
+
+func size_slice_int32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ n += p.valSize(uint64(x))
+ }
+ return
+}
+
+// Encode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ p.valEnc(buf, uint64(x))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_int32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ bufSize += p.valSize(uint64(x))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of uint32s ([]uint32).
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ x := s.Index(i)
+ p.valEnc(o, uint64(x))
+ }
+ return nil
+}
+
+func size_slice_uint32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ x := s.Index(i)
+ n += p.valSize(uint64(x))
+ }
+ return
+}
+
+// Encode a slice of uint32s ([]uint32) in packed format.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ p.valEnc(buf, uint64(s.Index(i)))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_uint32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ bufSize += p.valSize(uint64(s.Index(i)))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of int64s ([]int64).
+func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, s.Index(i))
+ }
+ return nil
+}
+
+func size_slice_int64(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ n += p.valSize(s.Index(i))
+ }
+ return
+}
+
+// Encode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ p.valEnc(buf, s.Index(i))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_int64(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ bufSize += p.valSize(s.Index(i))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of slice of bytes ([][]byte).
+func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error {
+ ss := *structPointer_BytesSlice(base, p.field)
+ l := len(ss)
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(ss[i])
+ }
+ return nil
+}
+
+func size_slice_slice_byte(p *Properties, base structPointer) (n int) {
+ ss := *structPointer_BytesSlice(base, p.field)
+ l := len(ss)
+ if l == 0 {
+ return 0
+ }
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ n += sizeRawBytes(ss[i])
+ }
+ return
+}
+
+// Encode a slice of strings ([]string).
+func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error {
+ ss := *structPointer_StringSlice(base, p.field)
+ l := len(ss)
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(ss[i])
+ }
+ return nil
+}
+
+func size_slice_string(p *Properties, base structPointer) (n int) {
+ ss := *structPointer_StringSlice(base, p.field)
+ l := len(ss)
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ n += sizeStringBytes(ss[i])
+ }
+ return
+}
+
+// Encode a slice of message structs ([]*struct).
+func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ for i := 0; i < l; i++ {
+ structp := s.Index(i)
+ if structPointer_IsNil(structp) {
+ return errRepeatedHasNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ continue
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ err := o.enc_len_struct(p.sprop, structp, &state)
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return errRepeatedHasNil
+ }
+ return err
+ }
+ }
+ return state.err
+}
+
+func size_slice_struct_message(p *Properties, base structPointer) (n int) {
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ structp := s.Index(i)
+ if structPointer_IsNil(structp) {
+ return // return the size up to this point
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n += len(p.tagcode)
+ n += sizeRawBytes(data)
+ continue
+ }
+
+ n0 := size_struct(p.sprop, structp)
+ n1 := sizeVarint(uint64(n0)) // size of encoded length
+ n += n0 + n1
+ }
+ return
+}
+
+// Encode a slice of group structs ([]*struct).
+func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error {
+ var state errorState
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ for i := 0; i < l; i++ {
+ b := s.Index(i)
+ if structPointer_IsNil(b) {
+ return errRepeatedHasNil
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+
+ err := o.enc_struct(p.sprop, b)
+
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return errRepeatedHasNil
+ }
+ return err
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ }
+ return state.err
+}
+
+func size_slice_struct_group(p *Properties, base structPointer) (n int) {
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup))
+ n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup))
+ for i := 0; i < l; i++ {
+ b := s.Index(i)
+ if structPointer_IsNil(b) {
+ return // return size up to this point
+ }
+
+ n += size_struct(p.sprop, b)
+ }
+ return
+}
+
+// Encode an extension map.
+func (o *Buffer) enc_map(p *Properties, base structPointer) error {
+ v := *structPointer_ExtMap(base, p.field)
+ if err := encodeExtensionMap(v); err != nil {
+ return err
+ }
+ // Fast-path for common cases: zero or one extensions.
+ if len(v) <= 1 {
+ for _, e := range v {
+ o.buf = append(o.buf, e.enc...)
+ }
+ return nil
+ }
+
+ // Sort keys to provide a deterministic encoding.
+ keys := make([]int, 0, len(v))
+ for k := range v {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ for _, k := range keys {
+ o.buf = append(o.buf, v[int32(k)].enc...)
+ }
+ return nil
+}
+
+func size_map(p *Properties, base structPointer) int {
+ v := *structPointer_ExtMap(base, p.field)
+ return sizeExtensionMap(v)
+}
+
+// Encode a map field.
+func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
+ var state errorState // XXX: or do we need to plumb this through?
+
+ /*
+ A map defined as
+ map<key_type, value_type> map_field = N;
+ is encoded in the same way as
+ message MapFieldEntry {
+ key_type key = 1;
+ value_type value = 2;
+ }
+ repeated MapFieldEntry map_field = N;
+ */
+
+ v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
+ if v.Len() == 0 {
+ return nil
+ }
+
+ keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+ enc := func() error {
+ if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil {
+ return err
+ }
+ if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil {
+ return err
+ }
+ return nil
+ }
+
+ // Don't sort map keys. It is not required by the spec, and C++ doesn't do it.
+ for _, key := range v.MapKeys() {
+ val := v.MapIndex(key)
+
+ // The only illegal map entry values are nil message pointers.
+ if val.Kind() == reflect.Ptr && val.IsNil() {
+ return errors.New("proto: map has nil element")
+ }
+
+ keycopy.Set(key)
+ valcopy.Set(val)
+
+ o.buf = append(o.buf, p.tagcode...)
+ if err := o.enc_len_thing(enc, &state); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func size_new_map(p *Properties, base structPointer) int {
+ v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V
+
+ keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+ n := 0
+ for _, key := range v.MapKeys() {
+ val := v.MapIndex(key)
+ keycopy.Set(key)
+ valcopy.Set(val)
+
+ // Tag codes for key and val are the responsibility of the sub-sizer.
+ keysize := p.mkeyprop.size(p.mkeyprop, keybase)
+ valsize := p.mvalprop.size(p.mvalprop, valbase)
+ entry := keysize + valsize
+ // Add on tag code and length of map entry itself.
+ n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry
+ }
+ return n
+}
+
+// mapEncodeScratch returns a new reflect.Value matching the map's value type,
+// and a structPointer suitable for passing to an encoder or sizer.
+func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) {
+ // Prepare addressable doubly-indirect placeholders for the key and value types.
+ // This is needed because the element-type encoders expect **T, but the map iteration produces T.
+
+ keycopy = reflect.New(mapType.Key()).Elem() // addressable K
+ keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K
+ keyptr.Set(keycopy.Addr()) //
+ keybase = toStructPointer(keyptr.Addr()) // **K
+
+ // Value types are more varied and require special handling.
+ switch mapType.Elem().Kind() {
+ case reflect.Slice:
+ // []byte
+ var dummy []byte
+ valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte
+ valbase = toStructPointer(valcopy.Addr())
+ case reflect.Ptr:
+ // message; the generated field type is map[K]*Msg (so V is *Msg),
+ // so we only need one level of indirection.
+ valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
+ valbase = toStructPointer(valcopy.Addr())
+ default:
+ // everything else
+ valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
+ valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V
+ valptr.Set(valcopy.Addr()) //
+ valbase = toStructPointer(valptr.Addr()) // **V
+ }
+ return
+}
+
+// Encode a struct.
+func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
+ var state errorState
+ // Encode fields in tag order so that decoders may use optimizations
+ // that depend on the ordering.
+ // https://developers.google.com/protocol-buffers/docs/encoding#order
+ for _, i := range prop.order {
+ p := prop.Prop[i]
+ if p.enc != nil {
+ err := p.enc(o, p, base)
+ if err != nil {
+ if err == ErrNil {
+ if p.Required && state.err == nil {
+ state.err = &RequiredNotSetError{p.Name}
+ }
+ } else if err == errRepeatedHasNil {
+ // Give more context to nil values in repeated fields.
+ return errors.New("repeated field " + p.OrigName + " has nil element")
+ } else if !state.shouldContinue(err, p) {
+ return err
+ }
+ }
+ }
+ }
+
+ // Do oneof fields.
+ if prop.oneofMarshaler != nil {
+ m := structPointer_Interface(base, prop.stype).(Message)
+ if err := prop.oneofMarshaler(m, o); err == ErrNil {
+ return errOneofHasNil
+ } else if err != nil {
+ return err
+ }
+ }
+
+ // Add unrecognized fields at the end.
+ if prop.unrecField.IsValid() {
+ v := *structPointer_Bytes(base, prop.unrecField)
+ if len(v) > 0 {
+ o.buf = append(o.buf, v...)
+ }
+ }
+
+ return state.err
+}
+
+func size_struct(prop *StructProperties, base structPointer) (n int) {
+ for _, i := range prop.order {
+ p := prop.Prop[i]
+ if p.size != nil {
+ n += p.size(p, base)
+ }
+ }
+
+ // Add unrecognized fields at the end.
+ if prop.unrecField.IsValid() {
+ v := *structPointer_Bytes(base, prop.unrecField)
+ n += len(v)
+ }
+
+ // Factor in any oneof fields.
+ if prop.oneofSizer != nil {
+ m := structPointer_Interface(base, prop.stype).(Message)
+ n += prop.oneofSizer(m)
+ }
+
+ return
+}
+
+var zeroes [20]byte // longer than any conceivable sizeVarint
+
+// Encode a struct, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error {
+ return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state)
+}
+
+// Encode something, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error {
+ iLen := len(o.buf)
+ o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length
+ iMsg := len(o.buf)
+ err := enc()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ lMsg := len(o.buf) - iMsg
+ lLen := sizeVarint(uint64(lMsg))
+ switch x := lLen - (iMsg - iLen); {
+ case x > 0: // actual length is x bytes larger than the space we reserved
+ // Move msg x bytes right.
+ o.buf = append(o.buf, zeroes[:x]...)
+ copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+ case x < 0: // actual length is x bytes smaller than the space we reserved
+ // Move msg x bytes left.
+ copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+ o.buf = o.buf[:len(o.buf)+x] // x is negative
+ }
+ // Encode the length in the reserved space.
+ o.buf = o.buf[:iLen]
+ o.EncodeVarint(uint64(lMsg))
+ o.buf = o.buf[:len(o.buf)+lMsg]
+ return state.err
+}
+
+// errorState maintains the first error that occurs and updates that error
+// with additional context.
+type errorState struct {
+ err error
+}
+
+// shouldContinue reports whether encoding should continue upon encountering the
+// given error. If the error is RequiredNotSetError, shouldContinue returns true
+// and, if this is the first appearance of that error, remembers it for future
+// reporting.
+//
+// If prop is not nil, it may update any error with additional context about the
+// field with the error.
+func (s *errorState) shouldContinue(err error, prop *Properties) bool {
+ // Ignore unset required fields.
+ reqNotSet, ok := err.(*RequiredNotSetError)
+ if !ok {
+ return false
+ }
+ if s.err == nil {
+ if prop != nil {
+ err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field}
+ }
+ s.err = err
+ }
+ return true
+}
diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go
new file mode 100644
index 0000000..f5db1de
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/equal.go
@@ -0,0 +1,276 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer comparison.
+
+package proto
+
+import (
+ "bytes"
+ "log"
+ "reflect"
+ "strings"
+)
+
+/*
+Equal returns true iff protocol buffers a and b are equal.
+The arguments must both be pointers to protocol buffer structs.
+
+Equality is defined in this way:
+ - Two messages are equal iff they are the same type,
+ corresponding fields are equal, unknown field sets
+ are equal, and extensions sets are equal.
+ - Two set scalar fields are equal iff their values are equal.
+ If the fields are of a floating-point type, remember that
+ NaN != x for all x, including NaN. If the message is defined
+ in a proto3 .proto file, fields are not "set"; specifically,
+ zero length proto3 "bytes" fields are equal (nil == {}).
+ - Two repeated fields are equal iff their lengths are the same,
+ and their corresponding elements are equal (a "bytes" field,
+ although represented by []byte, is not a repeated field)
+ - Two unset fields are equal.
+ - Two unknown field sets are equal if their current
+ encoded state is equal.
+ - Two extension sets are equal iff they have corresponding
+ elements that are pairwise equal.
+ - Every other combination of things are not equal.
+
+The return value is undefined if a and b are not protocol buffers.
+*/
+func Equal(a, b Message) bool {
+ if a == nil || b == nil {
+ return a == b
+ }
+ v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
+ if v1.Type() != v2.Type() {
+ return false
+ }
+ if v1.Kind() == reflect.Ptr {
+ if v1.IsNil() {
+ return v2.IsNil()
+ }
+ if v2.IsNil() {
+ return false
+ }
+ v1, v2 = v1.Elem(), v2.Elem()
+ }
+ if v1.Kind() != reflect.Struct {
+ return false
+ }
+ return equalStruct(v1, v2)
+}
+
+// v1 and v2 are known to have the same type.
+func equalStruct(v1, v2 reflect.Value) bool {
+ sprop := GetProperties(v1.Type())
+ for i := 0; i < v1.NumField(); i++ {
+ f := v1.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ f1, f2 := v1.Field(i), v2.Field(i)
+ if f.Type.Kind() == reflect.Ptr {
+ if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
+ // both unset
+ continue
+ } else if n1 != n2 {
+ // set/unset mismatch
+ return false
+ }
+ b1, ok := f1.Interface().(raw)
+ if ok {
+ b2 := f2.Interface().(raw)
+ // RawMessage
+ if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
+ return false
+ }
+ continue
+ }
+ f1, f2 = f1.Elem(), f2.Elem()
+ }
+ if !equalAny(f1, f2, sprop.Prop[i]) {
+ return false
+ }
+ }
+
+ if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
+ em2 := v2.FieldByName("XXX_extensions")
+ if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
+ return false
+ }
+ }
+
+ uf := v1.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return true
+ }
+
+ u1 := uf.Bytes()
+ u2 := v2.FieldByName("XXX_unrecognized").Bytes()
+ if !bytes.Equal(u1, u2) {
+ return false
+ }
+
+ return true
+}
+
+// v1 and v2 are known to have the same type.
+// prop may be nil.
+func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
+ if v1.Type() == protoMessageType {
+ m1, _ := v1.Interface().(Message)
+ m2, _ := v2.Interface().(Message)
+ return Equal(m1, m2)
+ }
+ switch v1.Kind() {
+ case reflect.Bool:
+ return v1.Bool() == v2.Bool()
+ case reflect.Float32, reflect.Float64:
+ return v1.Float() == v2.Float()
+ case reflect.Int32, reflect.Int64:
+ return v1.Int() == v2.Int()
+ case reflect.Interface:
+ // Probably a oneof field; compare the inner values.
+ n1, n2 := v1.IsNil(), v2.IsNil()
+ if n1 || n2 {
+ return n1 == n2
+ }
+ e1, e2 := v1.Elem(), v2.Elem()
+ if e1.Type() != e2.Type() {
+ return false
+ }
+ return equalAny(e1, e2, nil)
+ case reflect.Map:
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for _, key := range v1.MapKeys() {
+ val2 := v2.MapIndex(key)
+ if !val2.IsValid() {
+ // This key was not found in the second map.
+ return false
+ }
+ if !equalAny(v1.MapIndex(key), val2, nil) {
+ return false
+ }
+ }
+ return true
+ case reflect.Ptr:
+ return equalAny(v1.Elem(), v2.Elem(), prop)
+ case reflect.Slice:
+ if v1.Type().Elem().Kind() == reflect.Uint8 {
+ // short circuit: []byte
+
+ // Edge case: if this is in a proto3 message, a zero length
+ // bytes field is considered the zero value.
+ if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
+ return true
+ }
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
+ }
+
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for i := 0; i < v1.Len(); i++ {
+ if !equalAny(v1.Index(i), v2.Index(i), prop) {
+ return false
+ }
+ }
+ return true
+ case reflect.String:
+ return v1.Interface().(string) == v2.Interface().(string)
+ case reflect.Struct:
+ return equalStruct(v1, v2)
+ case reflect.Uint32, reflect.Uint64:
+ return v1.Uint() == v2.Uint()
+ }
+
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to compare %v", v1)
+ return false
+}
+
+// base is the struct type that the extensions are based on.
+// em1 and em2 are extension maps.
+func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool {
+ if len(em1) != len(em2) {
+ return false
+ }
+
+ for extNum, e1 := range em1 {
+ e2, ok := em2[extNum]
+ if !ok {
+ return false
+ }
+
+ m1, m2 := e1.value, e2.value
+
+ if m1 != nil && m2 != nil {
+ // Both are unencoded.
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+ return false
+ }
+ continue
+ }
+
+ // At least one is encoded. To do a semantically correct comparison
+ // we need to unmarshal them first.
+ var desc *ExtensionDesc
+ if m := extensionMaps[base]; m != nil {
+ desc = m[extNum]
+ }
+ if desc == nil {
+ log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
+ continue
+ }
+ var err error
+ if m1 == nil {
+ m1, err = decodeExtension(e1.enc, desc)
+ }
+ if m2 == nil && err == nil {
+ m2, err = decodeExtension(e2.enc, desc)
+ }
+ if err != nil {
+ // The encoded form is invalid.
+ log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
+ return false
+ }
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
new file mode 100644
index 0000000..054f4f1
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/extensions.go
@@ -0,0 +1,399 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Types and routines for supporting protocol buffer extensions.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "sync"
+)
+
+// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
+var ErrMissingExtension = errors.New("proto: missing extension")
+
+// ExtensionRange represents a range of message extensions for a protocol buffer.
+// Used in code generated by the protocol compiler.
+type ExtensionRange struct {
+ Start, End int32 // both inclusive
+}
+
+// extendableProto is an interface implemented by any protocol buffer that may be extended.
+type extendableProto interface {
+ Message
+ ExtensionRangeArray() []ExtensionRange
+ ExtensionMap() map[int32]Extension
+}
+
+var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
+
+// ExtensionDesc represents an extension specification.
+// Used in generated code from the protocol compiler.
+type ExtensionDesc struct {
+ ExtendedType Message // nil pointer to the type that is being extended
+ ExtensionType interface{} // nil pointer to the extension type
+ Field int32 // field number
+ Name string // fully-qualified name of extension, for text formatting
+ Tag string // protobuf tag style
+}
+
+func (ed *ExtensionDesc) repeated() bool {
+ t := reflect.TypeOf(ed.ExtensionType)
+ return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
+}
+
+// Extension represents an extension in a message.
+type Extension struct {
+ // When an extension is stored in a message using SetExtension
+ // only desc and value are set. When the message is marshaled
+ // enc will be set to the encoded form of the message.
+ //
+ // When a message is unmarshaled and contains extensions, each
+ // extension will have only enc set. When such an extension is
+ // accessed using GetExtension (or GetExtensions) desc and value
+ // will be set.
+ desc *ExtensionDesc
+ value interface{}
+ enc []byte
+}
+
+// SetRawExtension is for testing only.
+func SetRawExtension(base extendableProto, id int32, b []byte) {
+ base.ExtensionMap()[id] = Extension{enc: b}
+}
+
+// isExtensionField returns true iff the given field number is in an extension range.
+func isExtensionField(pb extendableProto, field int32) bool {
+ for _, er := range pb.ExtensionRangeArray() {
+ if er.Start <= field && field <= er.End {
+ return true
+ }
+ }
+ return false
+}
+
+// checkExtensionTypes checks that the given extension is valid for pb.
+func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
+ // Check the extended type.
+ if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b {
+ return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
+ }
+ // Check the range.
+ if !isExtensionField(pb, extension.Field) {
+ return errors.New("proto: bad extension number; not in declared ranges")
+ }
+ return nil
+}
+
+// extPropKey is sufficient to uniquely identify an extension.
+type extPropKey struct {
+ base reflect.Type
+ field int32
+}
+
+var extProp = struct {
+ sync.RWMutex
+ m map[extPropKey]*Properties
+}{
+ m: make(map[extPropKey]*Properties),
+}
+
+func extensionProperties(ed *ExtensionDesc) *Properties {
+ key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
+
+ extProp.RLock()
+ if prop, ok := extProp.m[key]; ok {
+ extProp.RUnlock()
+ return prop
+ }
+ extProp.RUnlock()
+
+ extProp.Lock()
+ defer extProp.Unlock()
+ // Check again.
+ if prop, ok := extProp.m[key]; ok {
+ return prop
+ }
+
+ prop := new(Properties)
+ prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
+ extProp.m[key] = prop
+ return prop
+}
+
+// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m.
+func encodeExtensionMap(m map[int32]Extension) error {
+ for k, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ et := reflect.TypeOf(e.desc.ExtensionType)
+ props := extensionProperties(e.desc)
+
+ p := NewBuffer(nil)
+ // If e.value has type T, the encoder expects a *struct{ X T }.
+ // Pass a *T with a zero field and hope it all works out.
+ x := reflect.New(et)
+ x.Elem().Set(reflect.ValueOf(e.value))
+ if err := props.enc(p, props, toStructPointer(x)); err != nil {
+ return err
+ }
+ e.enc = p.buf
+ m[k] = e
+ }
+ return nil
+}
+
+func sizeExtensionMap(m map[int32]Extension) (n int) {
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ n += len(e.enc)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ et := reflect.TypeOf(e.desc.ExtensionType)
+ props := extensionProperties(e.desc)
+
+ // If e.value has type T, the encoder expects a *struct{ X T }.
+ // Pass a *T with a zero field and hope it all works out.
+ x := reflect.New(et)
+ x.Elem().Set(reflect.ValueOf(e.value))
+ n += props.size(props, toStructPointer(x))
+ }
+ return
+}
+
+// HasExtension returns whether the given extension is present in pb.
+func HasExtension(pb extendableProto, extension *ExtensionDesc) bool {
+ // TODO: Check types, field numbers, etc.?
+ _, ok := pb.ExtensionMap()[extension.Field]
+ return ok
+}
+
+// ClearExtension removes the given extension from pb.
+func ClearExtension(pb extendableProto, extension *ExtensionDesc) {
+ // TODO: Check types, field numbers, etc.?
+ delete(pb.ExtensionMap(), extension.Field)
+}
+
+// GetExtension parses and returns the given extension of pb.
+// If the extension is not present and has no default value it returns ErrMissingExtension.
+func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) {
+ if err := checkExtensionTypes(pb, extension); err != nil {
+ return nil, err
+ }
+
+ emap := pb.ExtensionMap()
+ e, ok := emap[extension.Field]
+ if !ok {
+ // defaultExtensionValue returns the default value or
+ // ErrMissingExtension if there is no default.
+ return defaultExtensionValue(extension)
+ }
+
+ if e.value != nil {
+ // Already decoded. Check the descriptor, though.
+ if e.desc != extension {
+ // This shouldn't happen. If it does, it means that
+ // GetExtension was called twice with two different
+ // descriptors with the same field number.
+ return nil, errors.New("proto: descriptor conflict")
+ }
+ return e.value, nil
+ }
+
+ v, err := decodeExtension(e.enc, extension)
+ if err != nil {
+ return nil, err
+ }
+
+ // Remember the decoded version and drop the encoded version.
+ // That way it is safe to mutate what we return.
+ e.value = v
+ e.desc = extension
+ e.enc = nil
+ emap[extension.Field] = e
+ return e.value, nil
+}
+
+// defaultExtensionValue returns the default value for extension.
+// If no default for an extension is defined ErrMissingExtension is returned.
+func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
+ t := reflect.TypeOf(extension.ExtensionType)
+ props := extensionProperties(extension)
+
+ sf, _, err := fieldDefault(t, props)
+ if err != nil {
+ return nil, err
+ }
+
+ if sf == nil || sf.value == nil {
+ // There is no default value.
+ return nil, ErrMissingExtension
+ }
+
+ if t.Kind() != reflect.Ptr {
+ // We do not need to return a Ptr, we can directly return sf.value.
+ return sf.value, nil
+ }
+
+ // We need to return an interface{} that is a pointer to sf.value.
+ value := reflect.New(t).Elem()
+ value.Set(reflect.New(value.Type().Elem()))
+ if sf.kind == reflect.Int32 {
+ // We may have an int32 or an enum, but the underlying data is int32.
+ // Since we can't set an int32 into a non int32 reflect.value directly
+ // set it as a int32.
+ value.Elem().SetInt(int64(sf.value.(int32)))
+ } else {
+ value.Elem().Set(reflect.ValueOf(sf.value))
+ }
+ return value.Interface(), nil
+}
+
+// decodeExtension decodes an extension encoded in b.
+func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
+ o := NewBuffer(b)
+
+ t := reflect.TypeOf(extension.ExtensionType)
+
+ props := extensionProperties(extension)
+
+ // t is a pointer to a struct, pointer to basic type or a slice.
+ // Allocate a "field" to store the pointer/slice itself; the
+ // pointer/slice will be stored here. We pass
+ // the address of this field to props.dec.
+ // This passes a zero field and a *t and lets props.dec
+ // interpret it as a *struct{ x t }.
+ value := reflect.New(t).Elem()
+
+ for {
+ // Discard wire type and field number varint. It isn't needed.
+ if _, err := o.DecodeVarint(); err != nil {
+ return nil, err
+ }
+
+ if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {
+ return nil, err
+ }
+
+ if o.index >= len(o.buf) {
+ break
+ }
+ }
+ return value.Interface(), nil
+}
+
+// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
+// The returned slice has the same length as es; missing extensions will appear as nil elements.
+func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
+ epb, ok := pb.(extendableProto)
+ if !ok {
+ err = errors.New("proto: not an extendable proto")
+ return
+ }
+ extensions = make([]interface{}, len(es))
+ for i, e := range es {
+ extensions[i], err = GetExtension(epb, e)
+ if err == ErrMissingExtension {
+ err = nil
+ }
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// SetExtension sets the specified extension of pb to the specified value.
+func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error {
+ if err := checkExtensionTypes(pb, extension); err != nil {
+ return err
+ }
+ typ := reflect.TypeOf(extension.ExtensionType)
+ if typ != reflect.TypeOf(value) {
+ return errors.New("proto: bad extension value type")
+ }
+ // nil extension values need to be caught early, because the
+ // encoder can't distinguish an ErrNil due to a nil extension
+ // from an ErrNil due to a missing field. Extensions are
+ // always optional, so the encoder would just swallow the error
+ // and drop all the extensions from the encoded message.
+ if reflect.ValueOf(value).IsNil() {
+ return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
+ }
+
+ pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value}
+ return nil
+}
+
+// A global registry of extensions.
+// The generated code will register the generated descriptors by calling RegisterExtension.
+
+var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
+
+// RegisterExtension is called from the generated code.
+func RegisterExtension(desc *ExtensionDesc) {
+ st := reflect.TypeOf(desc.ExtendedType).Elem()
+ m := extensionMaps[st]
+ if m == nil {
+ m = make(map[int32]*ExtensionDesc)
+ extensionMaps[st] = m
+ }
+ if _, ok := m[desc.Field]; ok {
+ panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
+ }
+ m[desc.Field] = desc
+}
+
+// RegisteredExtensions returns a map of the registered extensions of a
+// protocol buffer struct, indexed by the extension number.
+// The argument pb should be a nil pointer to the struct type.
+func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
+ return extensionMaps[reflect.TypeOf(pb).Elem()]
+}
diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go
new file mode 100644
index 0000000..0de8f8d
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/lib.go
@@ -0,0 +1,894 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package proto converts data structures to and from the wire format of
+protocol buffers. It works in concert with the Go source code generated
+for .proto files by the protocol compiler.
+
+A summary of the properties of the protocol buffer interface
+for a protocol buffer variable v:
+
+ - Names are turned from camel_case to CamelCase for export.
+ - There are no methods on v to set fields; just treat
+ them as structure fields.
+ - There are getters that return a field's value if set,
+ and return the field's default value if unset.
+ The getters work even if the receiver is a nil message.
+ - The zero value for a struct is its correct initialization state.
+ All desired fields must be set before marshaling.
+ - A Reset() method will restore a protobuf struct to its zero state.
+ - Non-repeated fields are pointers to the values; nil means unset.
+ That is, optional or required field int32 f becomes F *int32.
+ - Repeated fields are slices.
+ - Helper functions are available to aid the setting of fields.
+ msg.Foo = proto.String("hello") // set field
+ - Constants are defined to hold the default values of all fields that
+ have them. They have the form Default_StructName_FieldName.
+ Because the getter methods handle defaulted values,
+ direct use of these constants should be rare.
+ - Enums are given type names and maps from names to values.
+ Enum values are prefixed by the enclosing message's name, or by the
+ enum's type name if it is a top-level enum. Enum types have a String
+ method, and a Enum method to assist in message construction.
+ - Nested messages, groups and enums have type names prefixed with the name of
+ the surrounding message type.
+ - Extensions are given descriptor names that start with E_,
+ followed by an underscore-delimited list of the nested messages
+ that contain it (if any) followed by the CamelCased name of the
+ extension field itself. HasExtension, ClearExtension, GetExtension
+ and SetExtension are functions for manipulating extensions.
+ - Oneof field sets are given a single field in their message,
+ with distinguished wrapper types for each possible field value.
+ - Marshal and Unmarshal are functions to encode and decode the wire format.
+
+When the .proto file specifies `syntax="proto3"`, there are some differences:
+
+ - Non-repeated fields of non-message type are values instead of pointers.
+ - Getters are only generated for message and oneof fields.
+ - Enum types do not get an Enum method.
+
+The simplest way to describe this is to see an example.
+Given file test.proto, containing
+
+ package example;
+
+ enum FOO { X = 17; }
+
+ message Test {
+ required string label = 1;
+ optional int32 type = 2 [default=77];
+ repeated int64 reps = 3;
+ optional group OptionalGroup = 4 {
+ required string RequiredField = 5;
+ }
+ oneof union {
+ int32 number = 6;
+ string name = 7;
+ }
+ }
+
+The resulting file, test.pb.go, is:
+
+ package example
+
+ import proto "github.com/golang/protobuf/proto"
+ import math "math"
+
+ type FOO int32
+ const (
+ FOO_X FOO = 17
+ )
+ var FOO_name = map[int32]string{
+ 17: "X",
+ }
+ var FOO_value = map[string]int32{
+ "X": 17,
+ }
+
+ func (x FOO) Enum() *FOO {
+ p := new(FOO)
+ *p = x
+ return p
+ }
+ func (x FOO) String() string {
+ return proto.EnumName(FOO_name, int32(x))
+ }
+ func (x *FOO) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FOO_value, data)
+ if err != nil {
+ return err
+ }
+ *x = FOO(value)
+ return nil
+ }
+
+ type Test struct {
+ Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
+ Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
+ Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
+ Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
+ // Types that are valid to be assigned to Union:
+ // *Test_Number
+ // *Test_Name
+ Union isTest_Union `protobuf_oneof:"union"`
+ XXX_unrecognized []byte `json:"-"`
+ }
+ func (m *Test) Reset() { *m = Test{} }
+ func (m *Test) String() string { return proto.CompactTextString(m) }
+ func (*Test) ProtoMessage() {}
+
+ type isTest_Union interface {
+ isTest_Union()
+ }
+
+ type Test_Number struct {
+ Number int32 `protobuf:"varint,6,opt,name=number"`
+ }
+ type Test_Name struct {
+ Name string `protobuf:"bytes,7,opt,name=name"`
+ }
+
+ func (*Test_Number) isTest_Union() {}
+ func (*Test_Name) isTest_Union() {}
+
+ func (m *Test) GetUnion() isTest_Union {
+ if m != nil {
+ return m.Union
+ }
+ return nil
+ }
+ const Default_Test_Type int32 = 77
+
+ func (m *Test) GetLabel() string {
+ if m != nil && m.Label != nil {
+ return *m.Label
+ }
+ return ""
+ }
+
+ func (m *Test) GetType() int32 {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Default_Test_Type
+ }
+
+ func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
+ if m != nil {
+ return m.Optionalgroup
+ }
+ return nil
+ }
+
+ type Test_OptionalGroup struct {
+ RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
+ }
+ func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} }
+ func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
+
+ func (m *Test_OptionalGroup) GetRequiredField() string {
+ if m != nil && m.RequiredField != nil {
+ return *m.RequiredField
+ }
+ return ""
+ }
+
+ func (m *Test) GetNumber() int32 {
+ if x, ok := m.GetUnion().(*Test_Number); ok {
+ return x.Number
+ }
+ return 0
+ }
+
+ func (m *Test) GetName() string {
+ if x, ok := m.GetUnion().(*Test_Name); ok {
+ return x.Name
+ }
+ return ""
+ }
+
+ func init() {
+ proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
+ }
+
+To create and play with a Test object:
+
+ package main
+
+ import (
+ "log"
+
+ "github.com/golang/protobuf/proto"
+ pb "./example.pb"
+ )
+
+ func main() {
+ test := &pb.Test{
+ Label: proto.String("hello"),
+ Type: proto.Int32(17),
+ Reps: []int64{1, 2, 3},
+ Optionalgroup: &pb.Test_OptionalGroup{
+ RequiredField: proto.String("good bye"),
+ },
+ Union: &pb.Test_Name{"fred"},
+ }
+ data, err := proto.Marshal(test)
+ if err != nil {
+ log.Fatal("marshaling error: ", err)
+ }
+ newTest := &pb.Test{}
+ err = proto.Unmarshal(data, newTest)
+ if err != nil {
+ log.Fatal("unmarshaling error: ", err)
+ }
+ // Now test and newTest contain the same data.
+ if test.GetLabel() != newTest.GetLabel() {
+ log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
+ }
+ // Use a type switch to determine which oneof was set.
+ switch u := test.Union.(type) {
+ case *pb.Test_Number: // u.Number contains the number.
+ case *pb.Test_Name: // u.Name contains the string.
+ }
+ // etc.
+ }
+*/
+package proto
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "reflect"
+ "sort"
+ "strconv"
+ "sync"
+)
+
+// Message is implemented by generated protocol buffer messages.
+type Message interface {
+ Reset()
+ String() string
+ ProtoMessage()
+}
+
+// Stats records allocation details about the protocol buffer encoders
+// and decoders. Useful for tuning the library itself.
+type Stats struct {
+ Emalloc uint64 // mallocs in encode
+ Dmalloc uint64 // mallocs in decode
+ Encode uint64 // number of encodes
+ Decode uint64 // number of decodes
+ Chit uint64 // number of cache hits
+ Cmiss uint64 // number of cache misses
+ Size uint64 // number of sizes
+}
+
+// Set to true to enable stats collection.
+const collectStats = false
+
+var stats Stats
+
+// GetStats returns a copy of the global Stats structure.
+func GetStats() Stats { return stats }
+
+// A Buffer is a buffer manager for marshaling and unmarshaling
+// protocol buffers. It may be reused between invocations to
+// reduce memory usage. It is not necessary to use a Buffer;
+// the global functions Marshal and Unmarshal create a
+// temporary Buffer and are fine for most applications.
+type Buffer struct {
+ buf []byte // encode/decode byte stream
+ index int // write point
+
+ // pools of basic types to amortize allocation.
+ bools []bool
+ uint32s []uint32
+ uint64s []uint64
+
+ // extra pools, only used with pointer_reflect.go
+ int32s []int32
+ int64s []int64
+ float32s []float32
+ float64s []float64
+}
+
+// NewBuffer allocates a new Buffer and initializes its internal data to
+// the contents of the argument slice.
+func NewBuffer(e []byte) *Buffer {
+ return &Buffer{buf: e}
+}
+
+// Reset resets the Buffer, ready for marshaling a new protocol buffer.
+func (p *Buffer) Reset() {
+ p.buf = p.buf[0:0] // for reading/writing
+ p.index = 0 // for reading
+}
+
+// SetBuf replaces the internal buffer with the slice,
+// ready for unmarshaling the contents of the slice.
+func (p *Buffer) SetBuf(s []byte) {
+ p.buf = s
+ p.index = 0
+}
+
+// Bytes returns the contents of the Buffer.
+func (p *Buffer) Bytes() []byte { return p.buf }
+
+/*
+ * Helper routines for simplifying the creation of optional fields of basic type.
+ */
+
+// Bool is a helper routine that allocates a new bool value
+// to store v and returns a pointer to it.
+func Bool(v bool) *bool {
+ return &v
+}
+
+// Int32 is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it.
+func Int32(v int32) *int32 {
+ return &v
+}
+
+// Int is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it, but unlike Int32
+// its argument value is an int.
+func Int(v int) *int32 {
+ p := new(int32)
+ *p = int32(v)
+ return p
+}
+
+// Int64 is a helper routine that allocates a new int64 value
+// to store v and returns a pointer to it.
+func Int64(v int64) *int64 {
+ return &v
+}
+
+// Float32 is a helper routine that allocates a new float32 value
+// to store v and returns a pointer to it.
+func Float32(v float32) *float32 {
+ return &v
+}
+
+// Float64 is a helper routine that allocates a new float64 value
+// to store v and returns a pointer to it.
+func Float64(v float64) *float64 {
+ return &v
+}
+
+// Uint32 is a helper routine that allocates a new uint32 value
+// to store v and returns a pointer to it.
+func Uint32(v uint32) *uint32 {
+ return &v
+}
+
+// Uint64 is a helper routine that allocates a new uint64 value
+// to store v and returns a pointer to it.
+func Uint64(v uint64) *uint64 {
+ return &v
+}
+
+// String is a helper routine that allocates a new string value
+// to store v and returns a pointer to it.
+func String(v string) *string {
+ return &v
+}
+
+// EnumName is a helper function to simplify printing protocol buffer enums
+// by name. Given an enum map and a value, it returns a useful string.
+func EnumName(m map[int32]string, v int32) string {
+ s, ok := m[v]
+ if ok {
+ return s
+ }
+ return strconv.Itoa(int(v))
+}
+
+// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
+// from their JSON-encoded representation. Given a map from the enum's symbolic
+// names to its int values, and a byte buffer containing the JSON-encoded
+// value, it returns an int32 that can be cast to the enum type by the caller.
+//
+// The function can deal with both JSON representations, numeric and symbolic.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+ if data[0] == '"' {
+ // New style: enums are strings.
+ var repr string
+ if err := json.Unmarshal(data, &repr); err != nil {
+ return -1, err
+ }
+ val, ok := m[repr]
+ if !ok {
+ return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+ }
+ return val, nil
+ }
+ // Old style: enums are ints.
+ var val int32
+ if err := json.Unmarshal(data, &val); err != nil {
+ return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+ }
+ return val, nil
+}
+
+// DebugPrint dumps the encoded data in b in a debugging format with a header
+// including the string s. Used in testing but made available for general debugging.
+func (p *Buffer) DebugPrint(s string, b []byte) {
+ var u uint64
+
+ obuf := p.buf
+ index := p.index
+ p.buf = b
+ p.index = 0
+ depth := 0
+
+ fmt.Printf("\n--- %s ---\n", s)
+
+out:
+ for {
+ for i := 0; i < depth; i++ {
+ fmt.Print(" ")
+ }
+
+ index := p.index
+ if index == len(p.buf) {
+ break
+ }
+
+ op, err := p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: fetching op err %v\n", index, err)
+ break out
+ }
+ tag := op >> 3
+ wire := op & 7
+
+ switch wire {
+ default:
+ fmt.Printf("%3d: t=%3d unknown wire=%d\n",
+ index, tag, wire)
+ break out
+
+ case WireBytes:
+ var r []byte
+
+ r, err = p.DecodeRawBytes(false)
+ if err != nil {
+ break out
+ }
+ fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
+ if len(r) <= 6 {
+ for i := 0; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ } else {
+ for i := 0; i < 3; i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ fmt.Printf(" ..")
+ for i := len(r) - 3; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ }
+ fmt.Printf("\n")
+
+ case WireFixed32:
+ u, err = p.DecodeFixed32()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
+
+ case WireFixed64:
+ u, err = p.DecodeFixed64()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
+
+ case WireVarint:
+ u, err = p.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
+
+ case WireStartGroup:
+ fmt.Printf("%3d: t=%3d start\n", index, tag)
+ depth++
+
+ case WireEndGroup:
+ depth--
+ fmt.Printf("%3d: t=%3d end\n", index, tag)
+ }
+ }
+
+ if depth != 0 {
+ fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
+ }
+ fmt.Printf("\n")
+
+ p.buf = obuf
+ p.index = index
+}
+
+// SetDefaults sets unset protocol buffer fields to their default values.
+// It only modifies fields that are both unset and have defined defaults.
+// It recursively sets default values in any non-nil sub-messages.
+func SetDefaults(pb Message) {
+ setDefaults(reflect.ValueOf(pb), true, false)
+}
+
+// v is a pointer to a struct.
+func setDefaults(v reflect.Value, recur, zeros bool) {
+ v = v.Elem()
+
+ defaultMu.RLock()
+ dm, ok := defaults[v.Type()]
+ defaultMu.RUnlock()
+ if !ok {
+ dm = buildDefaultMessage(v.Type())
+ defaultMu.Lock()
+ defaults[v.Type()] = dm
+ defaultMu.Unlock()
+ }
+
+ for _, sf := range dm.scalars {
+ f := v.Field(sf.index)
+ if !f.IsNil() {
+ // field already set
+ continue
+ }
+ dv := sf.value
+ if dv == nil && !zeros {
+ // no explicit default, and don't want to set zeros
+ continue
+ }
+ fptr := f.Addr().Interface() // **T
+ // TODO: Consider batching the allocations we do here.
+ switch sf.kind {
+ case reflect.Bool:
+ b := new(bool)
+ if dv != nil {
+ *b = dv.(bool)
+ }
+ *(fptr.(**bool)) = b
+ case reflect.Float32:
+ f := new(float32)
+ if dv != nil {
+ *f = dv.(float32)
+ }
+ *(fptr.(**float32)) = f
+ case reflect.Float64:
+ f := new(float64)
+ if dv != nil {
+ *f = dv.(float64)
+ }
+ *(fptr.(**float64)) = f
+ case reflect.Int32:
+ // might be an enum
+ if ft := f.Type(); ft != int32PtrType {
+ // enum
+ f.Set(reflect.New(ft.Elem()))
+ if dv != nil {
+ f.Elem().SetInt(int64(dv.(int32)))
+ }
+ } else {
+ // int32 field
+ i := new(int32)
+ if dv != nil {
+ *i = dv.(int32)
+ }
+ *(fptr.(**int32)) = i
+ }
+ case reflect.Int64:
+ i := new(int64)
+ if dv != nil {
+ *i = dv.(int64)
+ }
+ *(fptr.(**int64)) = i
+ case reflect.String:
+ s := new(string)
+ if dv != nil {
+ *s = dv.(string)
+ }
+ *(fptr.(**string)) = s
+ case reflect.Uint8:
+ // exceptional case: []byte
+ var b []byte
+ if dv != nil {
+ db := dv.([]byte)
+ b = make([]byte, len(db))
+ copy(b, db)
+ } else {
+ b = []byte{}
+ }
+ *(fptr.(*[]byte)) = b
+ case reflect.Uint32:
+ u := new(uint32)
+ if dv != nil {
+ *u = dv.(uint32)
+ }
+ *(fptr.(**uint32)) = u
+ case reflect.Uint64:
+ u := new(uint64)
+ if dv != nil {
+ *u = dv.(uint64)
+ }
+ *(fptr.(**uint64)) = u
+ default:
+ log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
+ }
+ }
+
+ for _, ni := range dm.nested {
+ f := v.Field(ni)
+ // f is *T or []*T or map[T]*T
+ switch f.Kind() {
+ case reflect.Ptr:
+ if f.IsNil() {
+ continue
+ }
+ setDefaults(f, recur, zeros)
+
+ case reflect.Slice:
+ for i := 0; i < f.Len(); i++ {
+ e := f.Index(i)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+
+ case reflect.Map:
+ for _, k := range f.MapKeys() {
+ e := f.MapIndex(k)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+ }
+ }
+}
+
+var (
+ // defaults maps a protocol buffer struct type to a slice of the fields,
+ // with its scalar fields set to their proto-declared non-zero default values.
+ defaultMu sync.RWMutex
+ defaults = make(map[reflect.Type]defaultMessage)
+
+ int32PtrType = reflect.TypeOf((*int32)(nil))
+)
+
+// defaultMessage represents information about the default values of a message.
+type defaultMessage struct {
+ scalars []scalarField
+ nested []int // struct field index of nested messages
+}
+
+type scalarField struct {
+ index int // struct field index
+ kind reflect.Kind // element type (the T in *T or []T)
+ value interface{} // the proto-declared default value, or nil
+}
+
+// t is a struct type.
+func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
+ sprop := GetProperties(t)
+ for _, prop := range sprop.Prop {
+ fi, ok := sprop.decoderTags.get(prop.Tag)
+ if !ok {
+ // XXX_unrecognized
+ continue
+ }
+ ft := t.Field(fi).Type
+
+ sf, nested, err := fieldDefault(ft, prop)
+ switch {
+ case err != nil:
+ log.Print(err)
+ case nested:
+ dm.nested = append(dm.nested, fi)
+ case sf != nil:
+ sf.index = fi
+ dm.scalars = append(dm.scalars, *sf)
+ }
+ }
+
+ return dm
+}
+
+// fieldDefault returns the scalarField for field type ft.
+// sf will be nil if the field can not have a default.
+// nestedMessage will be true if this is a nested message.
+// Note that sf.index is not set on return.
+func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
+ var canHaveDefault bool
+ switch ft.Kind() {
+ case reflect.Ptr:
+ if ft.Elem().Kind() == reflect.Struct {
+ nestedMessage = true
+ } else {
+ canHaveDefault = true // proto2 scalar field
+ }
+
+ case reflect.Slice:
+ switch ft.Elem().Kind() {
+ case reflect.Ptr:
+ nestedMessage = true // repeated message
+ case reflect.Uint8:
+ canHaveDefault = true // bytes field
+ }
+
+ case reflect.Map:
+ if ft.Elem().Kind() == reflect.Ptr {
+ nestedMessage = true // map with message values
+ }
+ }
+
+ if !canHaveDefault {
+ if nestedMessage {
+ return nil, true, nil
+ }
+ return nil, false, nil
+ }
+
+ // We now know that ft is a pointer or slice.
+ sf = &scalarField{kind: ft.Elem().Kind()}
+
+ // scalar fields without defaults
+ if !prop.HasDefault {
+ return sf, false, nil
+ }
+
+ // a scalar field: either *T or []byte
+ switch ft.Elem().Kind() {
+ case reflect.Bool:
+ x, err := strconv.ParseBool(prop.Default)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Float32:
+ x, err := strconv.ParseFloat(prop.Default, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
+ }
+ sf.value = float32(x)
+ case reflect.Float64:
+ x, err := strconv.ParseFloat(prop.Default, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.Int32:
+ x, err := strconv.ParseInt(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
+ }
+ sf.value = int32(x)
+ case reflect.Int64:
+ x, err := strconv.ParseInt(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ case reflect.String:
+ sf.value = prop.Default
+ case reflect.Uint8:
+ // []byte (not *uint8)
+ sf.value = []byte(prop.Default)
+ case reflect.Uint32:
+ x, err := strconv.ParseUint(prop.Default, 10, 32)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
+ }
+ sf.value = uint32(x)
+ case reflect.Uint64:
+ x, err := strconv.ParseUint(prop.Default, 10, 64)
+ if err != nil {
+ return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
+ }
+ sf.value = x
+ default:
+ return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
+ }
+
+ return sf, false, nil
+}
+
+// Map fields may have key types of non-float scalars, strings and enums.
+// The easiest way to sort them in some deterministic order is to use fmt.
+// If this turns out to be inefficient we can always consider other options,
+// such as doing a Schwartzian transform.
+
+func mapKeys(vs []reflect.Value) sort.Interface {
+ s := mapKeySorter{
+ vs: vs,
+ // default Less function: textual comparison
+ less: func(a, b reflect.Value) bool {
+ return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface())
+ },
+ }
+
+ // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps;
+ // numeric keys are sorted numerically.
+ if len(vs) == 0 {
+ return s
+ }
+ switch vs[0].Kind() {
+ case reflect.Int32, reflect.Int64:
+ s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
+ case reflect.Uint32, reflect.Uint64:
+ s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
+ }
+
+ return s
+}
+
+type mapKeySorter struct {
+ vs []reflect.Value
+ less func(a, b reflect.Value) bool
+}
+
+func (s mapKeySorter) Len() int { return len(s.vs) }
+func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
+func (s mapKeySorter) Less(i, j int) bool {
+ return s.less(s.vs[i], s.vs[j])
+}
+
+// isProto3Zero reports whether v is a zero proto3 value.
+func isProto3Zero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint32, reflect.Uint64:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.String:
+ return v.String() == ""
+ }
+ return false
+}
+
+// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the proto package.
+const ProtoPackageIsVersion1 = true
diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go
new file mode 100644
index 0000000..e25e01e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/message_set.go
@@ -0,0 +1,280 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Support for message sets.
+ */
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+)
+
+// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
+// A message type ID is required for storing a protocol buffer in a message set.
+var errNoMessageTypeID = errors.New("proto does not have a message type ID")
+
+// The first two types (_MessageSet_Item and messageSet)
+// model what the protocol compiler produces for the following protocol message:
+// message MessageSet {
+// repeated group Item = 1 {
+// required int32 type_id = 2;
+// required string message = 3;
+// };
+// }
+// That is the MessageSet wire format. We can't use a proto to generate these
+// because that would introduce a circular dependency between it and this package.
+
+type _MessageSet_Item struct {
+ TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
+ Message []byte `protobuf:"bytes,3,req,name=message"`
+}
+
+type messageSet struct {
+ Item []*_MessageSet_Item `protobuf:"group,1,rep"`
+ XXX_unrecognized []byte
+ // TODO: caching?
+}
+
+// Make sure messageSet is a Message.
+var _ Message = (*messageSet)(nil)
+
+// messageTypeIder is an interface satisfied by a protocol buffer type
+// that may be stored in a MessageSet.
+type messageTypeIder interface {
+ MessageTypeId() int32
+}
+
+func (ms *messageSet) find(pb Message) *_MessageSet_Item {
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return nil
+ }
+ id := mti.MessageTypeId()
+ for _, item := range ms.Item {
+ if *item.TypeId == id {
+ return item
+ }
+ }
+ return nil
+}
+
+func (ms *messageSet) Has(pb Message) bool {
+ if ms.find(pb) != nil {
+ return true
+ }
+ return false
+}
+
+func (ms *messageSet) Unmarshal(pb Message) error {
+ if item := ms.find(pb); item != nil {
+ return Unmarshal(item.Message, pb)
+ }
+ if _, ok := pb.(messageTypeIder); !ok {
+ return errNoMessageTypeID
+ }
+ return nil // TODO: return error instead?
+}
+
+func (ms *messageSet) Marshal(pb Message) error {
+ msg, err := Marshal(pb)
+ if err != nil {
+ return err
+ }
+ if item := ms.find(pb); item != nil {
+ // reuse existing item
+ item.Message = msg
+ return nil
+ }
+
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return errNoMessageTypeID
+ }
+
+ mtid := mti.MessageTypeId()
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: &mtid,
+ Message: msg,
+ })
+ return nil
+}
+
+func (ms *messageSet) Reset() { *ms = messageSet{} }
+func (ms *messageSet) String() string { return CompactTextString(ms) }
+func (*messageSet) ProtoMessage() {}
+
+// Support for the message_set_wire_format message option.
+
+func skipVarint(buf []byte) []byte {
+ i := 0
+ for ; buf[i]&0x80 != 0; i++ {
+ }
+ return buf[i+1:]
+}
+
+// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
+// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSet(m map[int32]Extension) ([]byte, error) {
+ if err := encodeExtensionMap(m); err != nil {
+ return nil, err
+ }
+
+ // Sort extension IDs to provide a deterministic encoding.
+ // See also enc_map in encode.go.
+ ids := make([]int, 0, len(m))
+ for id := range m {
+ ids = append(ids, int(id))
+ }
+ sort.Ints(ids)
+
+ ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
+ for _, id := range ids {
+ e := m[int32(id)]
+ // Remove the wire type and field number varint, as well as the length varint.
+ msg := skipVarint(skipVarint(e.enc))
+
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: Int32(int32(id)),
+ Message: msg,
+ })
+ }
+ return Marshal(ms)
+}
+
+// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error {
+ ms := new(messageSet)
+ if err := Unmarshal(buf, ms); err != nil {
+ return err
+ }
+ for _, item := range ms.Item {
+ id := *item.TypeId
+ msg := item.Message
+
+ // Restore wire type and field number varint, plus length varint.
+ // Be careful to preserve duplicate items.
+ b := EncodeVarint(uint64(id)<<3 | WireBytes)
+ if ext, ok := m[id]; ok {
+ // Existing data; rip off the tag and length varint
+ // so we join the new data correctly.
+ // We can assume that ext.enc is set because we are unmarshaling.
+ o := ext.enc[len(b):] // skip wire type and field number
+ _, n := DecodeVarint(o) // calculate length of length varint
+ o = o[n:] // skip length varint
+ msg = append(o, msg...) // join old data and new data
+ }
+ b = append(b, EncodeVarint(uint64(len(msg)))...)
+ b = append(b, msg...)
+
+ m[id] = Extension{enc: b}
+ }
+ return nil
+}
+
+// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
+// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) {
+ var b bytes.Buffer
+ b.WriteByte('{')
+
+ // Process the map in key order for deterministic output.
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
+
+ for i, id := range ids {
+ ext := m[id]
+ if i > 0 {
+ b.WriteByte(',')
+ }
+
+ msd, ok := messageSetMap[id]
+ if !ok {
+ // Unknown type; we can't render it, so skip it.
+ continue
+ }
+ fmt.Fprintf(&b, `"[%s]":`, msd.name)
+
+ x := ext.value
+ if x == nil {
+ x = reflect.New(msd.t.Elem()).Interface()
+ if err := Unmarshal(ext.enc, x.(Message)); err != nil {
+ return nil, err
+ }
+ }
+ d, err := json.Marshal(x)
+ if err != nil {
+ return nil, err
+ }
+ b.Write(d)
+ }
+ b.WriteByte('}')
+ return b.Bytes(), nil
+}
+
+// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
+// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error {
+ // Common-case fast path.
+ if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
+ return nil
+ }
+
+ // This is fairly tricky, and it's not clear that it is needed.
+ return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
+}
+
+// A global registry of types that can be used in a MessageSet.
+
+var messageSetMap = make(map[int32]messageSetDesc)
+
+type messageSetDesc struct {
+ t reflect.Type // pointer to struct
+ name string
+}
+
+// RegisterMessageSetType is called from the generated code.
+func RegisterMessageSetType(m Message, fieldNum int32, name string) {
+ messageSetMap[fieldNum] = messageSetDesc{
+ t: reflect.TypeOf(m),
+ name: name,
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
new file mode 100644
index 0000000..9899141
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go
@@ -0,0 +1,479 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build appengine js
+
+// This file contains an implementation of proto field accesses using package reflect.
+// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
+// be used on App Engine.
+
+package proto
+
+import (
+ "math"
+ "reflect"
+)
+
+// A structPointer is a pointer to a struct.
+type structPointer struct {
+ v reflect.Value
+}
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+// The reflect value must itself be a pointer to a struct.
+func toStructPointer(v reflect.Value) structPointer {
+ return structPointer{v}
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+ return p.v.IsNil()
+}
+
+// Interface returns the struct pointer as an interface value.
+func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
+ return p.v.Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by the sequence of field indices
+// passed to reflect's FieldByIndex.
+type field []int
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return f.Index
+}
+
+// invalidField is an invalid field identifier.
+var invalidField = field(nil)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool { return f != nil }
+
+// field returns the given field in the struct as a reflect value.
+func structPointer_field(p structPointer, f field) reflect.Value {
+ // Special case: an extension map entry with a value of type T
+ // passes a *T to the struct-handling code with a zero field,
+ // expecting that it will be treated as equivalent to *struct{ X T },
+ // which has the same memory layout. We have to handle that case
+ // specially, because reflect will panic if we call FieldByIndex on a
+ // non-struct.
+ if f == nil {
+ return p.v.Elem()
+ }
+
+ return p.v.Elem().FieldByIndex(f)
+}
+
+// ifield returns the given field in the struct as an interface value.
+func structPointer_ifield(p structPointer, f field) interface{} {
+ return structPointer_field(p, f).Addr().Interface()
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+ return structPointer_ifield(p, f).(*[]byte)
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+ return structPointer_ifield(p, f).(*[][]byte)
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+ return structPointer_ifield(p, f).(**bool)
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+ return structPointer_ifield(p, f).(*bool)
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+ return structPointer_ifield(p, f).(*[]bool)
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+ return structPointer_ifield(p, f).(**string)
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+ return structPointer_ifield(p, f).(*string)
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+ return structPointer_ifield(p, f).(*[]string)
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+ return structPointer_ifield(p, f).(*map[int32]Extension)
+}
+
+// NewAt returns the reflect.Value for a pointer to a field in the struct.
+func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
+ return structPointer_field(p, f).Addr()
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+ structPointer_field(p, f).Set(q.v)
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+ return structPointer{structPointer_field(p, f)}
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
+ return structPointerSlice{structPointer_field(p, f)}
+}
+
+// A structPointerSlice represents the address of a slice of pointers to structs
+// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
+type structPointerSlice struct {
+ v reflect.Value
+}
+
+func (p structPointerSlice) Len() int { return p.v.Len() }
+func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
+func (p structPointerSlice) Append(q structPointer) {
+ p.v.Set(reflect.Append(p.v, q.v))
+}
+
+var (
+ int32Type = reflect.TypeOf(int32(0))
+ uint32Type = reflect.TypeOf(uint32(0))
+ float32Type = reflect.TypeOf(float32(0))
+ int64Type = reflect.TypeOf(int64(0))
+ uint64Type = reflect.TypeOf(uint64(0))
+ float64Type = reflect.TypeOf(float64(0))
+)
+
+// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
+// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
+type word32 struct {
+ v reflect.Value
+}
+
+// IsNil reports whether p is nil.
+func word32_IsNil(p word32) bool {
+ return p.v.IsNil()
+}
+
+// Set sets p to point at a newly allocated word with bits set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+ t := p.v.Type().Elem()
+ switch t {
+ case int32Type:
+ if len(o.int32s) == 0 {
+ o.int32s = make([]int32, uint32PoolSize)
+ }
+ o.int32s[0] = int32(x)
+ p.v.Set(reflect.ValueOf(&o.int32s[0]))
+ o.int32s = o.int32s[1:]
+ return
+ case uint32Type:
+ if len(o.uint32s) == 0 {
+ o.uint32s = make([]uint32, uint32PoolSize)
+ }
+ o.uint32s[0] = x
+ p.v.Set(reflect.ValueOf(&o.uint32s[0]))
+ o.uint32s = o.uint32s[1:]
+ return
+ case float32Type:
+ if len(o.float32s) == 0 {
+ o.float32s = make([]float32, uint32PoolSize)
+ }
+ o.float32s[0] = math.Float32frombits(x)
+ p.v.Set(reflect.ValueOf(&o.float32s[0]))
+ o.float32s = o.float32s[1:]
+ return
+ }
+
+ // must be enum
+ p.v.Set(reflect.New(t))
+ p.v.Elem().SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32_Get(p word32) uint32 {
+ elem := p.v.Elem()
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+ return word32{structPointer_field(p, f)}
+}
+
+// A word32Val represents a field of type int32, uint32, float32, or enum.
+// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
+type word32Val struct {
+ v reflect.Value
+}
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+ switch p.v.Type() {
+ case int32Type:
+ p.v.SetInt(int64(x))
+ return
+ case uint32Type:
+ p.v.SetUint(uint64(x))
+ return
+ case float32Type:
+ p.v.SetFloat(float64(math.Float32frombits(x)))
+ return
+ }
+
+ // must be enum
+ p.v.SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32Val_Get(p word32Val) uint32 {
+ elem := p.v
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+ return word32Val{structPointer_field(p, f)}
+}
+
+// A word32Slice is a slice of 32-bit values.
+// That is, v.Type() is []int32, []uint32, []float32, or []enum.
+type word32Slice struct {
+ v reflect.Value
+}
+
+func (p word32Slice) Append(x uint32) {
+ n, m := p.v.Len(), p.v.Cap()
+ if n < m {
+ p.v.SetLen(n + 1)
+ } else {
+ t := p.v.Type().Elem()
+ p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+ }
+ elem := p.v.Index(n)
+ switch elem.Kind() {
+ case reflect.Int32:
+ elem.SetInt(int64(int32(x)))
+ case reflect.Uint32:
+ elem.SetUint(uint64(x))
+ case reflect.Float32:
+ elem.SetFloat(float64(math.Float32frombits(x)))
+ }
+}
+
+func (p word32Slice) Len() int {
+ return p.v.Len()
+}
+
+func (p word32Slice) Index(i int) uint32 {
+ elem := p.v.Index(i)
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) word32Slice {
+ return word32Slice{structPointer_field(p, f)}
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 struct {
+ v reflect.Value
+}
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+ t := p.v.Type().Elem()
+ switch t {
+ case int64Type:
+ if len(o.int64s) == 0 {
+ o.int64s = make([]int64, uint64PoolSize)
+ }
+ o.int64s[0] = int64(x)
+ p.v.Set(reflect.ValueOf(&o.int64s[0]))
+ o.int64s = o.int64s[1:]
+ return
+ case uint64Type:
+ if len(o.uint64s) == 0 {
+ o.uint64s = make([]uint64, uint64PoolSize)
+ }
+ o.uint64s[0] = x
+ p.v.Set(reflect.ValueOf(&o.uint64s[0]))
+ o.uint64s = o.uint64s[1:]
+ return
+ case float64Type:
+ if len(o.float64s) == 0 {
+ o.float64s = make([]float64, uint64PoolSize)
+ }
+ o.float64s[0] = math.Float64frombits(x)
+ p.v.Set(reflect.ValueOf(&o.float64s[0]))
+ o.float64s = o.float64s[1:]
+ return
+ }
+ panic("unreachable")
+}
+
+func word64_IsNil(p word64) bool {
+ return p.v.IsNil()
+}
+
+func word64_Get(p word64) uint64 {
+ elem := p.v.Elem()
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return elem.Uint()
+ case reflect.Float64:
+ return math.Float64bits(elem.Float())
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+ return word64{structPointer_field(p, f)}
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val struct {
+ v reflect.Value
+}
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+ switch p.v.Type() {
+ case int64Type:
+ p.v.SetInt(int64(x))
+ return
+ case uint64Type:
+ p.v.SetUint(x)
+ return
+ case float64Type:
+ p.v.SetFloat(math.Float64frombits(x))
+ return
+ }
+ panic("unreachable")
+}
+
+func word64Val_Get(p word64Val) uint64 {
+ elem := p.v
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return elem.Uint()
+ case reflect.Float64:
+ return math.Float64bits(elem.Float())
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+ return word64Val{structPointer_field(p, f)}
+}
+
+type word64Slice struct {
+ v reflect.Value
+}
+
+func (p word64Slice) Append(x uint64) {
+ n, m := p.v.Len(), p.v.Cap()
+ if n < m {
+ p.v.SetLen(n + 1)
+ } else {
+ t := p.v.Type().Elem()
+ p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+ }
+ elem := p.v.Index(n)
+ switch elem.Kind() {
+ case reflect.Int64:
+ elem.SetInt(int64(int64(x)))
+ case reflect.Uint64:
+ elem.SetUint(uint64(x))
+ case reflect.Float64:
+ elem.SetFloat(float64(math.Float64frombits(x)))
+ }
+}
+
+func (p word64Slice) Len() int {
+ return p.v.Len()
+}
+
+func (p word64Slice) Index(i int) uint64 {
+ elem := p.v.Index(i)
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return uint64(elem.Uint())
+ case reflect.Float64:
+ return math.Float64bits(float64(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64Slice(p structPointer, f field) word64Slice {
+ return word64Slice{structPointer_field(p, f)}
+}
diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
new file mode 100644
index 0000000..ceece77
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go
@@ -0,0 +1,266 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !appengine,!js
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// NOTE: These type_Foo functions would more idiomatically be methods,
+// but Go does not allow methods on pointer types, and we must preserve
+// some pointer type for the garbage collector. We use these
+// funcs with clunky names as our poor approximation to methods.
+//
+// An alternative would be
+// type structPointer struct { p unsafe.Pointer }
+// but that does not registerize as well.
+
+// A structPointer is a pointer to a struct.
+type structPointer unsafe.Pointer
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+func toStructPointer(v reflect.Value) structPointer {
+ return structPointer(unsafe.Pointer(v.Pointer()))
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+ return p == nil
+}
+
+// Interface returns the struct pointer, assumed to have element type t,
+// as an interface value.
+func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
+ return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by its byte offset from the start of the struct.
+type field uintptr
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return field(f.Offset)
+}
+
+// invalidField is an invalid field identifier.
+const invalidField = ^field(0)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool {
+ return f != ^field(0)
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+ return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+ return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+ return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+ return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+ return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+ return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+ return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+ return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+ return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// NewAt returns the reflect.Value for a pointer to a field in the struct.
+func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value {
+ return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+ *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+ return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
+ return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
+type structPointerSlice []structPointer
+
+func (v *structPointerSlice) Len() int { return len(*v) }
+func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
+func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) }
+
+// A word32 is the address of a "pointer to 32-bit value" field.
+type word32 **uint32
+
+// IsNil reports whether *v is nil.
+func word32_IsNil(p word32) bool {
+ return *p == nil
+}
+
+// Set sets *v to point at a newly allocated word set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+ if len(o.uint32s) == 0 {
+ o.uint32s = make([]uint32, uint32PoolSize)
+ }
+ o.uint32s[0] = x
+ *p = &o.uint32s[0]
+ o.uint32s = o.uint32s[1:]
+}
+
+// Get gets the value pointed at by *v.
+func word32_Get(p word32) uint32 {
+ return **p
+}
+
+// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+ return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Val is the address of a 32-bit value field.
+type word32Val *uint32
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+ *p = x
+}
+
+// Get gets the value pointed at by p.
+func word32Val_Get(p word32Val) uint32 {
+ return *p
+}
+
+// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+ return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Slice is a slice of 32-bit values.
+type word32Slice []uint32
+
+func (v *word32Slice) Append(x uint32) { *v = append(*v, x) }
+func (v *word32Slice) Len() int { return len(*v) }
+func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
+
+// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
+ return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 **uint64
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+ if len(o.uint64s) == 0 {
+ o.uint64s = make([]uint64, uint64PoolSize)
+ }
+ o.uint64s[0] = x
+ *p = &o.uint64s[0]
+ o.uint64s = o.uint64s[1:]
+}
+
+func word64_IsNil(p word64) bool {
+ return *p == nil
+}
+
+func word64_Get(p word64) uint64 {
+ return **p
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+ return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val *uint64
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+ *p = x
+}
+
+func word64Val_Get(p word64Val) uint64 {
+ return *p
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+ return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Slice is like word32Slice but for 64-bit values.
+type word64Slice []uint64
+
+func (v *word64Slice) Append(x uint64) { *v = append(*v, x) }
+func (v *word64Slice) Len() int { return len(*v) }
+func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
+
+func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
+ return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
new file mode 100644
index 0000000..4fe2ec2
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/properties.go
@@ -0,0 +1,846 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "fmt"
+ "log"
+ "os"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+const debug bool = false
+
+// Constants that identify the encoding of a value on the wire.
+const (
+ WireVarint = 0
+ WireFixed64 = 1
+ WireBytes = 2
+ WireStartGroup = 3
+ WireEndGroup = 4
+ WireFixed32 = 5
+)
+
+const startSize = 10 // initial slice/string sizes
+
+// Encoders are defined in encode.go
+// An encoder outputs the full representation of a field, including its
+// tag and encoder type.
+type encoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueEncoder encodes a single integer in a particular encoding.
+type valueEncoder func(o *Buffer, x uint64) error
+
+// Sizers are defined in encode.go
+// A sizer returns the encoded size of a field, including its tag and encoder
+// type.
+type sizer func(prop *Properties, base structPointer) int
+
+// A valueSizer returns the encoded size of a single integer in a particular
+// encoding.
+type valueSizer func(x uint64) int
+
+// Decoders are defined in decode.go
+// A decoder creates a value from its wire representation.
+// Unrecognized subelements are saved in unrec.
+type decoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueDecoder decodes a single integer in a particular encoding.
+type valueDecoder func(o *Buffer) (x uint64, err error)
+
+// A oneofMarshaler does the marshaling for all oneof fields in a message.
+type oneofMarshaler func(Message, *Buffer) error
+
+// A oneofUnmarshaler does the unmarshaling for a oneof field in a message.
+type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error)
+
+// A oneofSizer does the sizing for all oneof fields in a message.
+type oneofSizer func(Message) int
+
+// tagMap is an optimization over map[int]int for typical protocol buffer
+// use-cases. Encoded protocol buffers are often in tag order with small tag
+// numbers.
+type tagMap struct {
+ fastTags []int
+ slowTags map[int]int
+}
+
+// tagMapFastLimit is the upper bound on the tag number that will be stored in
+// the tagMap slice rather than its map.
+const tagMapFastLimit = 1024
+
+func (p *tagMap) get(t int) (int, bool) {
+ if t > 0 && t < tagMapFastLimit {
+ if t >= len(p.fastTags) {
+ return 0, false
+ }
+ fi := p.fastTags[t]
+ return fi, fi >= 0
+ }
+ fi, ok := p.slowTags[t]
+ return fi, ok
+}
+
+func (p *tagMap) put(t int, fi int) {
+ if t > 0 && t < tagMapFastLimit {
+ for len(p.fastTags) < t+1 {
+ p.fastTags = append(p.fastTags, -1)
+ }
+ p.fastTags[t] = fi
+ return
+ }
+ if p.slowTags == nil {
+ p.slowTags = make(map[int]int)
+ }
+ p.slowTags[t] = fi
+}
+
+// StructProperties represents properties for all the fields of a struct.
+// decoderTags and decoderOrigNames should only be used by the decoder.
+type StructProperties struct {
+ Prop []*Properties // properties for each field
+ reqCount int // required count
+ decoderTags tagMap // map from proto tag to struct field number
+ decoderOrigNames map[string]int // map from original name to struct field number
+ order []int // list of struct field numbers in tag order
+ unrecField field // field id of the XXX_unrecognized []byte field
+ extendable bool // is this an extendable proto
+
+ oneofMarshaler oneofMarshaler
+ oneofUnmarshaler oneofUnmarshaler
+ oneofSizer oneofSizer
+ stype reflect.Type
+
+ // OneofTypes contains information about the oneof fields in this message.
+ // It is keyed by the original name of a field.
+ OneofTypes map[string]*OneofProperties
+}
+
+// OneofProperties represents information about a specific field in a oneof.
+type OneofProperties struct {
+ Type reflect.Type // pointer to generated struct type for this oneof field
+ Field int // struct field number of the containing oneof in the message
+ Prop *Properties
+}
+
+// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
+// See encode.go, (*Buffer).enc_struct.
+
+func (sp *StructProperties) Len() int { return len(sp.order) }
+func (sp *StructProperties) Less(i, j int) bool {
+ return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
+}
+func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
+
+// Properties represents the protocol-specific behavior of a single struct field.
+type Properties struct {
+ Name string // name of the field, for error messages
+ OrigName string // original name before protocol compiler (always set)
+ JSONName string // name to use for JSON; determined by protoc
+ Wire string
+ WireType int
+ Tag int
+ Required bool
+ Optional bool
+ Repeated bool
+ Packed bool // relevant for repeated primitives only
+ Enum string // set for enum types only
+ proto3 bool // whether this is known to be a proto3 field; set for []byte only
+ oneof bool // whether this is a oneof field
+
+ Default string // default value
+ HasDefault bool // whether an explicit default was provided
+ def_uint64 uint64
+
+ enc encoder
+ valEnc valueEncoder // set for bool and numeric types only
+ field field
+ tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType)
+ tagbuf [8]byte
+ stype reflect.Type // set for struct types only
+ sprop *StructProperties // set for struct types only
+ isMarshaler bool
+ isUnmarshaler bool
+
+ mtype reflect.Type // set for map types only
+ mkeyprop *Properties // set for map types only
+ mvalprop *Properties // set for map types only
+
+ size sizer
+ valSize valueSizer // set for bool and numeric types only
+
+ dec decoder
+ valDec valueDecoder // set for bool and numeric types only
+
+ // If this is a packable field, this will be the decoder for the packed version of the field.
+ packedDec decoder
+}
+
+// String formats the properties in the protobuf struct field tag style.
+func (p *Properties) String() string {
+ s := p.Wire
+ s = ","
+ s += strconv.Itoa(p.Tag)
+ if p.Required {
+ s += ",req"
+ }
+ if p.Optional {
+ s += ",opt"
+ }
+ if p.Repeated {
+ s += ",rep"
+ }
+ if p.Packed {
+ s += ",packed"
+ }
+ s += ",name=" + p.OrigName
+ if p.JSONName != p.OrigName {
+ s += ",json=" + p.JSONName
+ }
+ if p.proto3 {
+ s += ",proto3"
+ }
+ if p.oneof {
+ s += ",oneof"
+ }
+ if len(p.Enum) > 0 {
+ s += ",enum=" + p.Enum
+ }
+ if p.HasDefault {
+ s += ",def=" + p.Default
+ }
+ return s
+}
+
+// Parse populates p by parsing a string in the protobuf struct field tag style.
+func (p *Properties) Parse(s string) {
+ // "bytes,49,opt,name=foo,def=hello!"
+ fields := strings.Split(s, ",") // breaks def=, but handled below.
+ if len(fields) < 2 {
+ fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
+ return
+ }
+
+ p.Wire = fields[0]
+ switch p.Wire {
+ case "varint":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeVarint
+ p.valDec = (*Buffer).DecodeVarint
+ p.valSize = sizeVarint
+ case "fixed32":
+ p.WireType = WireFixed32
+ p.valEnc = (*Buffer).EncodeFixed32
+ p.valDec = (*Buffer).DecodeFixed32
+ p.valSize = sizeFixed32
+ case "fixed64":
+ p.WireType = WireFixed64
+ p.valEnc = (*Buffer).EncodeFixed64
+ p.valDec = (*Buffer).DecodeFixed64
+ p.valSize = sizeFixed64
+ case "zigzag32":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeZigzag32
+ p.valDec = (*Buffer).DecodeZigzag32
+ p.valSize = sizeZigzag32
+ case "zigzag64":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeZigzag64
+ p.valDec = (*Buffer).DecodeZigzag64
+ p.valSize = sizeZigzag64
+ case "bytes", "group":
+ p.WireType = WireBytes
+ // no numeric converter for non-numeric types
+ default:
+ fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
+ return
+ }
+
+ var err error
+ p.Tag, err = strconv.Atoi(fields[1])
+ if err != nil {
+ return
+ }
+
+ for i := 2; i < len(fields); i++ {
+ f := fields[i]
+ switch {
+ case f == "req":
+ p.Required = true
+ case f == "opt":
+ p.Optional = true
+ case f == "rep":
+ p.Repeated = true
+ case f == "packed":
+ p.Packed = true
+ case strings.HasPrefix(f, "name="):
+ p.OrigName = f[5:]
+ case strings.HasPrefix(f, "json="):
+ p.JSONName = f[5:]
+ case strings.HasPrefix(f, "enum="):
+ p.Enum = f[5:]
+ case f == "proto3":
+ p.proto3 = true
+ case f == "oneof":
+ p.oneof = true
+ case strings.HasPrefix(f, "def="):
+ p.HasDefault = true
+ p.Default = f[4:] // rest of string
+ if i+1 < len(fields) {
+ // Commas aren't escaped, and def is always last.
+ p.Default += "," + strings.Join(fields[i+1:], ",")
+ break
+ }
+ }
+ }
+}
+
+func logNoSliceEnc(t1, t2 reflect.Type) {
+ fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
+}
+
+var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
+
+// Initialize the fields for encoding and decoding.
+func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
+ p.enc = nil
+ p.dec = nil
+ p.size = nil
+
+ switch t1 := typ; t1.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
+
+ // proto3 scalar types
+
+ case reflect.Bool:
+ p.enc = (*Buffer).enc_proto3_bool
+ p.dec = (*Buffer).dec_proto3_bool
+ p.size = size_proto3_bool
+ case reflect.Int32:
+ p.enc = (*Buffer).enc_proto3_int32
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_proto3_int32
+ case reflect.Uint32:
+ p.enc = (*Buffer).enc_proto3_uint32
+ p.dec = (*Buffer).dec_proto3_int32 // can reuse
+ p.size = size_proto3_uint32
+ case reflect.Int64, reflect.Uint64:
+ p.enc = (*Buffer).enc_proto3_int64
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_proto3_int64
+ case reflect.Float32:
+ p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_proto3_uint32
+ case reflect.Float64:
+ p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_proto3_int64
+ case reflect.String:
+ p.enc = (*Buffer).enc_proto3_string
+ p.dec = (*Buffer).dec_proto3_string
+ p.size = size_proto3_string
+
+ case reflect.Ptr:
+ switch t2 := t1.Elem(); t2.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2)
+ break
+ case reflect.Bool:
+ p.enc = (*Buffer).enc_bool
+ p.dec = (*Buffer).dec_bool
+ p.size = size_bool
+ case reflect.Int32:
+ p.enc = (*Buffer).enc_int32
+ p.dec = (*Buffer).dec_int32
+ p.size = size_int32
+ case reflect.Uint32:
+ p.enc = (*Buffer).enc_uint32
+ p.dec = (*Buffer).dec_int32 // can reuse
+ p.size = size_uint32
+ case reflect.Int64, reflect.Uint64:
+ p.enc = (*Buffer).enc_int64
+ p.dec = (*Buffer).dec_int64
+ p.size = size_int64
+ case reflect.Float32:
+ p.enc = (*Buffer).enc_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_int32
+ p.size = size_uint32
+ case reflect.Float64:
+ p.enc = (*Buffer).enc_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_int64
+ p.size = size_int64
+ case reflect.String:
+ p.enc = (*Buffer).enc_string
+ p.dec = (*Buffer).dec_string
+ p.size = size_string
+ case reflect.Struct:
+ p.stype = t1.Elem()
+ p.isMarshaler = isMarshaler(t1)
+ p.isUnmarshaler = isUnmarshaler(t1)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_struct_message
+ p.dec = (*Buffer).dec_struct_message
+ p.size = size_struct_message
+ } else {
+ p.enc = (*Buffer).enc_struct_group
+ p.dec = (*Buffer).dec_struct_group
+ p.size = size_struct_group
+ }
+ }
+
+ case reflect.Slice:
+ switch t2 := t1.Elem(); t2.Kind() {
+ default:
+ logNoSliceEnc(t1, t2)
+ break
+ case reflect.Bool:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_bool
+ p.size = size_slice_packed_bool
+ } else {
+ p.enc = (*Buffer).enc_slice_bool
+ p.size = size_slice_bool
+ }
+ p.dec = (*Buffer).dec_slice_bool
+ p.packedDec = (*Buffer).dec_slice_packed_bool
+ case reflect.Int32:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int32
+ p.size = size_slice_packed_int32
+ } else {
+ p.enc = (*Buffer).enc_slice_int32
+ p.size = size_slice_int32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case reflect.Uint32:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_uint32
+ p.size = size_slice_packed_uint32
+ } else {
+ p.enc = (*Buffer).enc_slice_uint32
+ p.size = size_slice_uint32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case reflect.Int64, reflect.Uint64:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int64
+ p.size = size_slice_packed_int64
+ } else {
+ p.enc = (*Buffer).enc_slice_int64
+ p.size = size_slice_int64
+ }
+ p.dec = (*Buffer).dec_slice_int64
+ p.packedDec = (*Buffer).dec_slice_packed_int64
+ case reflect.Uint8:
+ p.enc = (*Buffer).enc_slice_byte
+ p.dec = (*Buffer).dec_slice_byte
+ p.size = size_slice_byte
+ // This is a []byte, which is either a bytes field,
+ // or the value of a map field. In the latter case,
+ // we always encode an empty []byte, so we should not
+ // use the proto3 enc/size funcs.
+ // f == nil iff this is the key/value of a map field.
+ if p.proto3 && f != nil {
+ p.enc = (*Buffer).enc_proto3_slice_byte
+ p.size = size_proto3_slice_byte
+ }
+ case reflect.Float32, reflect.Float64:
+ switch t2.Bits() {
+ case 32:
+ // can just treat them as bits
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_uint32
+ p.size = size_slice_packed_uint32
+ } else {
+ p.enc = (*Buffer).enc_slice_uint32
+ p.size = size_slice_uint32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case 64:
+ // can just treat them as bits
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int64
+ p.size = size_slice_packed_int64
+ } else {
+ p.enc = (*Buffer).enc_slice_int64
+ p.size = size_slice_int64
+ }
+ p.dec = (*Buffer).dec_slice_int64
+ p.packedDec = (*Buffer).dec_slice_packed_int64
+ default:
+ logNoSliceEnc(t1, t2)
+ break
+ }
+ case reflect.String:
+ p.enc = (*Buffer).enc_slice_string
+ p.dec = (*Buffer).dec_slice_string
+ p.size = size_slice_string
+ case reflect.Ptr:
+ switch t3 := t2.Elem(); t3.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
+ break
+ case reflect.Struct:
+ p.stype = t2.Elem()
+ p.isMarshaler = isMarshaler(t2)
+ p.isUnmarshaler = isUnmarshaler(t2)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_slice_struct_message
+ p.dec = (*Buffer).dec_slice_struct_message
+ p.size = size_slice_struct_message
+ } else {
+ p.enc = (*Buffer).enc_slice_struct_group
+ p.dec = (*Buffer).dec_slice_struct_group
+ p.size = size_slice_struct_group
+ }
+ }
+ case reflect.Slice:
+ switch t2.Elem().Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
+ break
+ case reflect.Uint8:
+ p.enc = (*Buffer).enc_slice_slice_byte
+ p.dec = (*Buffer).dec_slice_slice_byte
+ p.size = size_slice_slice_byte
+ }
+ }
+
+ case reflect.Map:
+ p.enc = (*Buffer).enc_new_map
+ p.dec = (*Buffer).dec_new_map
+ p.size = size_new_map
+
+ p.mtype = t1
+ p.mkeyprop = &Properties{}
+ p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
+ p.mvalprop = &Properties{}
+ vtype := p.mtype.Elem()
+ if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
+ // The value type is not a message (*T) or bytes ([]byte),
+ // so we need encoders for the pointer to this type.
+ vtype = reflect.PtrTo(vtype)
+ }
+ p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
+ }
+
+ // precalculate tag code
+ wire := p.WireType
+ if p.Packed {
+ wire = WireBytes
+ }
+ x := uint32(p.Tag)<<3 | uint32(wire)
+ i := 0
+ for i = 0; x > 127; i++ {
+ p.tagbuf[i] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ p.tagbuf[i] = uint8(x)
+ p.tagcode = p.tagbuf[0 : i+1]
+
+ if p.stype != nil {
+ if lockGetProp {
+ p.sprop = GetProperties(p.stype)
+ } else {
+ p.sprop = getPropertiesLocked(p.stype)
+ }
+ }
+}
+
+var (
+ marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
+ unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+)
+
+// isMarshaler reports whether type t implements Marshaler.
+func isMarshaler(t reflect.Type) bool {
+ // We're checking for (likely) pointer-receiver methods
+ // so if t is not a pointer, something is very wrong.
+ // The calls above only invoke isMarshaler on pointer types.
+ if t.Kind() != reflect.Ptr {
+ panic("proto: misuse of isMarshaler")
+ }
+ return t.Implements(marshalerType)
+}
+
+// isUnmarshaler reports whether type t implements Unmarshaler.
+func isUnmarshaler(t reflect.Type) bool {
+ // We're checking for (likely) pointer-receiver methods
+ // so if t is not a pointer, something is very wrong.
+ // The calls above only invoke isUnmarshaler on pointer types.
+ if t.Kind() != reflect.Ptr {
+ panic("proto: misuse of isUnmarshaler")
+ }
+ return t.Implements(unmarshalerType)
+}
+
+// Init populates the properties from a protocol buffer struct tag.
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
+ p.init(typ, name, tag, f, true)
+}
+
+func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
+ // "bytes,49,opt,def=hello!"
+ p.Name = name
+ p.OrigName = name
+ if f != nil {
+ p.field = toField(f)
+ }
+ if tag == "" {
+ return
+ }
+ p.Parse(tag)
+ p.setEncAndDec(typ, f, lockGetProp)
+}
+
+var (
+ propertiesMu sync.RWMutex
+ propertiesMap = make(map[reflect.Type]*StructProperties)
+)
+
+// GetProperties returns the list of properties for the type represented by t.
+// t must represent a generated struct type of a protocol message.
+func GetProperties(t reflect.Type) *StructProperties {
+ if t.Kind() != reflect.Struct {
+ panic("proto: type must have kind struct")
+ }
+
+ // Most calls to GetProperties in a long-running program will be
+ // retrieving details for types we have seen before.
+ propertiesMu.RLock()
+ sprop, ok := propertiesMap[t]
+ propertiesMu.RUnlock()
+ if ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return sprop
+ }
+
+ propertiesMu.Lock()
+ sprop = getPropertiesLocked(t)
+ propertiesMu.Unlock()
+ return sprop
+}
+
+// getPropertiesLocked requires that propertiesMu is held.
+func getPropertiesLocked(t reflect.Type) *StructProperties {
+ if prop, ok := propertiesMap[t]; ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return prop
+ }
+ if collectStats {
+ stats.Cmiss++
+ }
+
+ prop := new(StructProperties)
+ // in case of recursive protos, fill this in now.
+ propertiesMap[t] = prop
+
+ // build properties
+ prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType)
+ prop.unrecField = invalidField
+ prop.Prop = make([]*Properties, t.NumField())
+ prop.order = make([]int, t.NumField())
+
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ p := new(Properties)
+ name := f.Name
+ p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
+
+ if f.Name == "XXX_extensions" { // special case
+ p.enc = (*Buffer).enc_map
+ p.dec = nil // not needed
+ p.size = size_map
+ }
+ if f.Name == "XXX_unrecognized" { // special case
+ prop.unrecField = toField(&f)
+ }
+ oneof := f.Tag.Get("protobuf_oneof") != "" // special case
+ prop.Prop[i] = p
+ prop.order[i] = i
+ if debug {
+ print(i, " ", f.Name, " ", t.String(), " ")
+ if p.Tag > 0 {
+ print(p.String())
+ }
+ print("\n")
+ }
+ if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && !oneof {
+ fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
+ }
+ }
+
+ // Re-order prop.order.
+ sort.Sort(prop)
+
+ type oneofMessage interface {
+ XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+ }
+ if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok {
+ var oots []interface{}
+ prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs()
+ prop.stype = t
+
+ // Interpret oneof metadata.
+ prop.OneofTypes = make(map[string]*OneofProperties)
+ for _, oot := range oots {
+ oop := &OneofProperties{
+ Type: reflect.ValueOf(oot).Type(), // *T
+ Prop: new(Properties),
+ }
+ sft := oop.Type.Elem().Field(0)
+ oop.Prop.Name = sft.Name
+ oop.Prop.Parse(sft.Tag.Get("protobuf"))
+ // There will be exactly one interface field that
+ // this new value is assignable to.
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if f.Type.Kind() != reflect.Interface {
+ continue
+ }
+ if !oop.Type.AssignableTo(f.Type) {
+ continue
+ }
+ oop.Field = i
+ break
+ }
+ prop.OneofTypes[oop.Prop.OrigName] = oop
+ }
+ }
+
+ // build required counts
+ // build tags
+ reqCount := 0
+ prop.decoderOrigNames = make(map[string]int)
+ for i, p := range prop.Prop {
+ if strings.HasPrefix(p.Name, "XXX_") {
+ // Internal fields should not appear in tags/origNames maps.
+ // They are handled specially when encoding and decoding.
+ continue
+ }
+ if p.Required {
+ reqCount++
+ }
+ prop.decoderTags.put(p.Tag, i)
+ prop.decoderOrigNames[p.OrigName] = i
+ }
+ prop.reqCount = reqCount
+
+ return prop
+}
+
+// Return the Properties object for the x[0]'th field of the structure.
+func propByIndex(t reflect.Type, x []int) *Properties {
+ if len(x) != 1 {
+ fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
+ return nil
+ }
+ prop := GetProperties(t)
+ return prop.Prop[x[0]]
+}
+
+// Get the address and type of a pointer to a struct from an interface.
+func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
+ if pb == nil {
+ err = ErrNil
+ return
+ }
+ // get the reflect type of the pointer to the struct.
+ t = reflect.TypeOf(pb)
+ // get the address of the struct.
+ value := reflect.ValueOf(pb)
+ b = toStructPointer(value)
+ return
+}
+
+// A global registry of enum types.
+// The generated code will register the generated maps by calling RegisterEnum.
+
+var enumValueMaps = make(map[string]map[string]int32)
+
+// RegisterEnum is called from the generated code to install the enum descriptor
+// maps into the global table to aid parsing text format protocol buffers.
+func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
+ if _, ok := enumValueMaps[typeName]; ok {
+ panic("proto: duplicate enum registered: " + typeName)
+ }
+ enumValueMaps[typeName] = valueMap
+}
+
+// EnumValueMap returns the mapping from names to integers of the
+// enum type enumType, or a nil if not found.
+func EnumValueMap(enumType string) map[string]int32 {
+ return enumValueMaps[enumType]
+}
+
+// A registry of all linked message types.
+// The string is a fully-qualified proto name ("pkg.Message").
+var (
+ protoTypes = make(map[string]reflect.Type)
+ revProtoTypes = make(map[reflect.Type]string)
+)
+
+// RegisterType is called from generated code and maps from the fully qualified
+// proto name to the type (pointer to struct) of the protocol buffer.
+func RegisterType(x Message, name string) {
+ if _, ok := protoTypes[name]; ok {
+ // TODO: Some day, make this a panic.
+ log.Printf("proto: duplicate proto type registered: %s", name)
+ return
+ }
+ t := reflect.TypeOf(x)
+ protoTypes[name] = t
+ revProtoTypes[t] = name
+}
+
+// MessageName returns the fully-qualified proto name for the given message type.
+func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] }
+
+// MessageType returns the message type (pointer to struct) for a named message.
+func MessageType(name string) reflect.Type { return protoTypes[name] }
diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go
new file mode 100644
index 0000000..37c9535
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text.go
@@ -0,0 +1,849 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for writing the text protocol buffer format.
+
+import (
+ "bufio"
+ "bytes"
+ "encoding"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "math"
+ "reflect"
+ "sort"
+ "strings"
+)
+
+var (
+ newline = []byte("\n")
+ spaces = []byte(" ")
+ gtNewline = []byte(">\n")
+ endBraceNewline = []byte("}\n")
+ backslashN = []byte{'\\', 'n'}
+ backslashR = []byte{'\\', 'r'}
+ backslashT = []byte{'\\', 't'}
+ backslashDQ = []byte{'\\', '"'}
+ backslashBS = []byte{'\\', '\\'}
+ posInf = []byte("inf")
+ negInf = []byte("-inf")
+ nan = []byte("nan")
+)
+
+type writer interface {
+ io.Writer
+ WriteByte(byte) error
+}
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+ ind int
+ complete bool // if the current position is a complete line
+ compact bool // whether to write out as a one-liner
+ w writer
+}
+
+func (w *textWriter) WriteString(s string) (n int, err error) {
+ if !strings.Contains(s, "\n") {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.complete = false
+ return io.WriteString(w.w, s)
+ }
+ // WriteString is typically called without newlines, so this
+ // codepath and its copy are rare. We copy to avoid
+ // duplicating all of Write's logic here.
+ return w.Write([]byte(s))
+}
+
+func (w *textWriter) Write(p []byte) (n int, err error) {
+ newlines := bytes.Count(p, newline)
+ if newlines == 0 {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ n, err = w.w.Write(p)
+ w.complete = false
+ return n, err
+ }
+
+ frags := bytes.SplitN(p, newline, newlines+1)
+ if w.compact {
+ for i, frag := range frags {
+ if i > 0 {
+ if err := w.w.WriteByte(' '); err != nil {
+ return n, err
+ }
+ n++
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ }
+ return n, nil
+ }
+
+ for i, frag := range frags {
+ if w.complete {
+ w.writeIndent()
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ if i+1 < len(frags) {
+ if err := w.w.WriteByte('\n'); err != nil {
+ return n, err
+ }
+ n++
+ }
+ }
+ w.complete = len(frags[len(frags)-1]) == 0
+ return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+ if w.compact && c == '\n' {
+ c = ' '
+ }
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ err := w.w.WriteByte(c)
+ w.complete = c == '\n'
+ return err
+}
+
+func (w *textWriter) indent() { w.ind++ }
+
+func (w *textWriter) unindent() {
+ if w.ind == 0 {
+ log.Printf("proto: textWriter unindented too far")
+ return
+ }
+ w.ind--
+}
+
+func writeName(w *textWriter, props *Properties) error {
+ if _, err := w.WriteString(props.OrigName); err != nil {
+ return err
+ }
+ if props.Wire != "group" {
+ return w.WriteByte(':')
+ }
+ return nil
+}
+
+// raw is the interface satisfied by RawMessage.
+type raw interface {
+ Bytes() []byte
+}
+
+func requiresQuotes(u string) bool {
+ // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
+ for _, ch := range u {
+ switch {
+ case ch == '.' || ch == '/' || ch == '_':
+ continue
+ case '0' <= ch && ch <= '9':
+ continue
+ case 'A' <= ch && ch <= 'Z':
+ continue
+ case 'a' <= ch && ch <= 'z':
+ continue
+ default:
+ return true
+ }
+ }
+ return false
+}
+
+// isAny reports whether sv is a google.protobuf.Any message
+func isAny(sv reflect.Value) bool {
+ type wkt interface {
+ XXX_WellKnownType() string
+ }
+ t, ok := sv.Addr().Interface().(wkt)
+ return ok && t.XXX_WellKnownType() == "Any"
+}
+
+// writeProto3Any writes an expanded google.protobuf.Any message.
+//
+// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
+// required messages are not linked in).
+//
+// It returns (true, error) when sv was written in expanded format or an error
+// was encountered.
+func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
+ turl := sv.FieldByName("TypeUrl")
+ val := sv.FieldByName("Value")
+ if !turl.IsValid() || !val.IsValid() {
+ return true, errors.New("proto: invalid google.protobuf.Any message")
+ }
+
+ b, ok := val.Interface().([]byte)
+ if !ok {
+ return true, errors.New("proto: invalid google.protobuf.Any message")
+ }
+
+ parts := strings.Split(turl.String(), "/")
+ mt := MessageType(parts[len(parts)-1])
+ if mt == nil {
+ return false, nil
+ }
+ m := reflect.New(mt.Elem())
+ if err := Unmarshal(b, m.Interface().(Message)); err != nil {
+ return false, nil
+ }
+ w.Write([]byte("["))
+ u := turl.String()
+ if requiresQuotes(u) {
+ writeString(w, u)
+ } else {
+ w.Write([]byte(u))
+ }
+ if w.compact {
+ w.Write([]byte("]:<"))
+ } else {
+ w.Write([]byte("]: <\n"))
+ w.ind++
+ }
+ if err := tm.writeStruct(w, m.Elem()); err != nil {
+ return true, err
+ }
+ if w.compact {
+ w.Write([]byte("> "))
+ } else {
+ w.ind--
+ w.Write([]byte(">\n"))
+ }
+ return true, nil
+}
+
+func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
+ if tm.ExpandAny && isAny(sv) {
+ if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
+ return err
+ }
+ }
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < sv.NumField(); i++ {
+ fv := sv.Field(i)
+ props := sprops.Prop[i]
+ name := st.Field(i).Name
+
+ if strings.HasPrefix(name, "XXX_") {
+ // There are two XXX_ fields:
+ // XXX_unrecognized []byte
+ // XXX_extensions map[int32]proto.Extension
+ // The first is handled here;
+ // the second is handled at the bottom of this function.
+ if name == "XXX_unrecognized" && !fv.IsNil() {
+ if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Field not filled in. This could be an optional field or
+ // a required field that wasn't filled in. Either way, there
+ // isn't anything we can show for it.
+ continue
+ }
+ if fv.Kind() == reflect.Slice && fv.IsNil() {
+ // Repeated field that is empty, or a bytes field that is unused.
+ continue
+ }
+
+ if props.Repeated && fv.Kind() == reflect.Slice {
+ // Repeated field.
+ for j := 0; j < fv.Len(); j++ {
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ v := fv.Index(j)
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ // A nil message in a repeated field is not valid,
+ // but we can handle that more gracefully than panicking.
+ if _, err := w.Write([]byte("<nil>\n")); err != nil {
+ return err
+ }
+ continue
+ }
+ if err := tm.writeAny(w, v, props); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Map {
+ // Map fields are rendered as a repeated struct with key/value fields.
+ keys := fv.MapKeys()
+ sort.Sort(mapKeys(keys))
+ for _, key := range keys {
+ val := fv.MapIndex(key)
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ // open struct
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ // key
+ if _, err := w.WriteString("key:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, key, props.mkeyprop); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ // nil values aren't legal, but we can avoid panicking because of them.
+ if val.Kind() != reflect.Ptr || !val.IsNil() {
+ // value
+ if _, err := w.WriteString("value:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, val, props.mvalprop); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ // close struct
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
+ // empty bytes field
+ continue
+ }
+ if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
+ // proto3 non-repeated scalar field; skip if zero value
+ if isProto3Zero(fv) {
+ continue
+ }
+ }
+
+ if fv.Kind() == reflect.Interface {
+ // Check if it is a oneof.
+ if st.Field(i).Tag.Get("protobuf_oneof") != "" {
+ // fv is nil, or holds a pointer to generated struct.
+ // That generated struct has exactly one field,
+ // which has a protobuf struct tag.
+ if fv.IsNil() {
+ continue
+ }
+ inner := fv.Elem().Elem() // interface -> *T -> T
+ tag := inner.Type().Field(0).Tag.Get("protobuf")
+ props = new(Properties) // Overwrite the outer props var, but not its pointee.
+ props.Parse(tag)
+ // Write the value in the oneof, not the oneof itself.
+ fv = inner.Field(0)
+
+ // Special case to cope with malformed messages gracefully:
+ // If the value in the oneof is a nil pointer, don't panic
+ // in writeAny.
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Use errors.New so writeAny won't render quotes.
+ msg := errors.New("/* nil */")
+ fv = reflect.ValueOf(&msg).Elem()
+ }
+ }
+ }
+
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if b, ok := fv.Interface().(raw); ok {
+ if err := writeRaw(w, b.Bytes()); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // Enums have a String method, so writeAny will work fine.
+ if err := tm.writeAny(w, fv, props); err != nil {
+ return err
+ }
+
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+
+ // Extensions (the XXX_extensions field).
+ pv := sv.Addr()
+ if pv.Type().Implements(extendableProtoType) {
+ if err := tm.writeExtensions(w, pv); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// writeRaw writes an uninterpreted raw message.
+func writeRaw(w *textWriter, b []byte) error {
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if err := writeUnknownStruct(w, b); err != nil {
+ return err
+ }
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ return nil
+}
+
+// writeAny writes an arbitrary field.
+func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
+ v = reflect.Indirect(v)
+
+ // Floats have special cases.
+ if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+ x := v.Float()
+ var b []byte
+ switch {
+ case math.IsInf(x, 1):
+ b = posInf
+ case math.IsInf(x, -1):
+ b = negInf
+ case math.IsNaN(x):
+ b = nan
+ }
+ if b != nil {
+ _, err := w.Write(b)
+ return err
+ }
+ // Other values are handled below.
+ }
+
+ // We don't attempt to serialise every possible value type; only those
+ // that can occur in protocol buffers.
+ switch v.Kind() {
+ case reflect.Slice:
+ // Should only be a []byte; repeated fields are handled in writeStruct.
+ if err := writeString(w, string(v.Interface().([]byte))); err != nil {
+ return err
+ }
+ case reflect.String:
+ if err := writeString(w, v.String()); err != nil {
+ return err
+ }
+ case reflect.Struct:
+ // Required/optional group/message.
+ var bra, ket byte = '<', '>'
+ if props != nil && props.Wire == "group" {
+ bra, ket = '{', '}'
+ }
+ if err := w.WriteByte(bra); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if etm, ok := v.Interface().(encoding.TextMarshaler); ok {
+ text, err := etm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = w.Write(text); err != nil {
+ return err
+ }
+ } else if err := tm.writeStruct(w, v); err != nil {
+ return err
+ }
+ w.unindent()
+ if err := w.WriteByte(ket); err != nil {
+ return err
+ }
+ default:
+ _, err := fmt.Fprint(w, v.Interface())
+ return err
+ }
+ return nil
+}
+
+// equivalent to C's isprint.
+func isprint(c byte) bool {
+ return c >= 0x20 && c < 0x7f
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(w *textWriter, s string) error {
+ // use WriteByte here to get any needed indent
+ if err := w.WriteByte('"'); err != nil {
+ return err
+ }
+ // Loop over the bytes, not the runes.
+ for i := 0; i < len(s); i++ {
+ var err error
+ // Divergence from C++: we don't escape apostrophes.
+ // There's no need to escape them, and the C++ parser
+ // copes with a naked apostrophe.
+ switch c := s[i]; c {
+ case '\n':
+ _, err = w.w.Write(backslashN)
+ case '\r':
+ _, err = w.w.Write(backslashR)
+ case '\t':
+ _, err = w.w.Write(backslashT)
+ case '"':
+ _, err = w.w.Write(backslashDQ)
+ case '\\':
+ _, err = w.w.Write(backslashBS)
+ default:
+ if isprint(c) {
+ err = w.w.WriteByte(c)
+ } else {
+ _, err = fmt.Fprintf(w.w, "\\%03o", c)
+ }
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return w.WriteByte('"')
+}
+
+func writeUnknownStruct(w *textWriter, data []byte) (err error) {
+ if !w.compact {
+ if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
+ return err
+ }
+ }
+ b := NewBuffer(data)
+ for b.index < len(b.buf) {
+ x, err := b.DecodeVarint()
+ if err != nil {
+ _, err := fmt.Fprintf(w, "/* %v */\n", err)
+ return err
+ }
+ wire, tag := x&7, x>>3
+ if wire == WireEndGroup {
+ w.unindent()
+ if _, err := w.Write(endBraceNewline); err != nil {
+ return err
+ }
+ continue
+ }
+ if _, err := fmt.Fprint(w, tag); err != nil {
+ return err
+ }
+ if wire != WireStartGroup {
+ if err := w.WriteByte(':'); err != nil {
+ return err
+ }
+ }
+ if !w.compact || wire == WireStartGroup {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ switch wire {
+ case WireBytes:
+ buf, e := b.DecodeRawBytes(false)
+ if e == nil {
+ _, err = fmt.Fprintf(w, "%q", buf)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", e)
+ }
+ case WireFixed32:
+ x, err = b.DecodeFixed32()
+ err = writeUnknownInt(w, x, err)
+ case WireFixed64:
+ x, err = b.DecodeFixed64()
+ err = writeUnknownInt(w, x, err)
+ case WireStartGroup:
+ err = w.WriteByte('{')
+ w.indent()
+ case WireVarint:
+ x, err = b.DecodeVarint()
+ err = writeUnknownInt(w, x, err)
+ default:
+ _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
+ }
+ if err != nil {
+ return err
+ }
+ if err = w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeUnknownInt(w *textWriter, x uint64, err error) error {
+ if err == nil {
+ _, err = fmt.Fprint(w, x)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", err)
+ }
+ return err
+}
+
+type int32Slice []int32
+
+func (s int32Slice) Len() int { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// writeExtensions writes all the extensions in pv.
+// pv is assumed to be a pointer to a protocol message struct that is extendable.
+func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
+ emap := extensionMaps[pv.Type().Elem()]
+ ep := pv.Interface().(extendableProto)
+
+ // Order the extensions by ID.
+ // This isn't strictly necessary, but it will give us
+ // canonical output, which will also make testing easier.
+ m := ep.ExtensionMap()
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids))
+
+ for _, extNum := range ids {
+ ext := m[extNum]
+ var desc *ExtensionDesc
+ if emap != nil {
+ desc = emap[extNum]
+ }
+ if desc == nil {
+ // Unknown extension.
+ if err := writeUnknownStruct(w, ext.enc); err != nil {
+ return err
+ }
+ continue
+ }
+
+ pb, err := GetExtension(ep, desc)
+ if err != nil {
+ return fmt.Errorf("failed getting extension: %v", err)
+ }
+
+ // Repeated extensions will appear as a slice.
+ if !desc.repeated() {
+ if err := tm.writeExtension(w, desc.Name, pb); err != nil {
+ return err
+ }
+ } else {
+ v := reflect.ValueOf(pb)
+ for i := 0; i < v.Len(); i++ {
+ if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
+ if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (w *textWriter) writeIndent() {
+ if !w.complete {
+ return
+ }
+ remain := w.ind * 2
+ for remain > 0 {
+ n := remain
+ if n > len(spaces) {
+ n = len(spaces)
+ }
+ w.w.Write(spaces[:n])
+ remain -= n
+ }
+ w.complete = false
+}
+
+// TextMarshaler is a configurable text format marshaler.
+type TextMarshaler struct {
+ Compact bool // use compact text format (one line).
+ ExpandAny bool // expand google.protobuf.Any messages of known types
+}
+
+// Marshal writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
+ val := reflect.ValueOf(pb)
+ if pb == nil || val.IsNil() {
+ w.Write([]byte("<nil>"))
+ return nil
+ }
+ var bw *bufio.Writer
+ ww, ok := w.(writer)
+ if !ok {
+ bw = bufio.NewWriter(w)
+ ww = bw
+ }
+ aw := &textWriter{
+ w: ww,
+ complete: true,
+ compact: tm.Compact,
+ }
+
+ if etm, ok := pb.(encoding.TextMarshaler); ok {
+ text, err := etm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = aw.Write(text); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+ }
+ // Dereference the received pointer so we don't have outer < and >.
+ v := reflect.Indirect(val)
+ if err := tm.writeStruct(aw, v); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+}
+
+// Text is the same as Marshal, but returns the string directly.
+func (tm *TextMarshaler) Text(pb Message) string {
+ var buf bytes.Buffer
+ tm.Marshal(&buf, pb)
+ return buf.String()
+}
+
+var (
+ defaultTextMarshaler = TextMarshaler{}
+ compactTextMarshaler = TextMarshaler{Compact: true}
+)
+
+// TODO: consider removing some of the Marshal functions below.
+
+// MarshalText writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
+
+// MarshalTextString is the same as MarshalText, but returns the string directly.
+func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
+
+// CompactText writes a given protocol buffer in compact text format (one line).
+func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
+
+// CompactTextString is the same as CompactText, but returns the string directly.
+func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go
new file mode 100644
index 0000000..b5e1c8e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_parser.go
@@ -0,0 +1,871 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for parsing the Text protocol buffer format.
+// TODO: message sets.
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type ParseError struct {
+ Message string
+ Line int // 1-based line number
+ Offset int // 0-based byte offset from start of input
+}
+
+func (p *ParseError) Error() string {
+ if p.Line == 1 {
+ // show offset only for first line
+ return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
+ }
+ return fmt.Sprintf("line %d: %v", p.Line, p.Message)
+}
+
+type token struct {
+ value string
+ err *ParseError
+ line int // line number
+ offset int // byte number from start of input, not start of line
+ unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func (t *token) String() string {
+ if t.err == nil {
+ return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
+ }
+ return fmt.Sprintf("parse error: %v", t.err)
+}
+
+type textParser struct {
+ s string // remaining input
+ done bool // whether the parsing is finished (success or error)
+ backed bool // whether back() was called
+ offset, line int
+ cur token
+}
+
+func newTextParser(s string) *textParser {
+ p := new(textParser)
+ p.s = s
+ p.line = 1
+ p.cur.line = 1
+ return p
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+ pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+ p.cur.err = pe
+ p.done = true
+ return pe
+}
+
+// Numbers and identifiers are matched by [-+._A-Za-z0-9]
+func isIdentOrNumberChar(c byte) bool {
+ switch {
+ case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+ return true
+ case '0' <= c && c <= '9':
+ return true
+ }
+ switch c {
+ case '-', '+', '.', '_':
+ return true
+ }
+ return false
+}
+
+func isWhitespace(c byte) bool {
+ switch c {
+ case ' ', '\t', '\n', '\r':
+ return true
+ }
+ return false
+}
+
+func isQuote(c byte) bool {
+ switch c {
+ case '"', '\'':
+ return true
+ }
+ return false
+}
+
+func (p *textParser) skipWhitespace() {
+ i := 0
+ for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+ if p.s[i] == '#' {
+ // comment; skip to end of line or input
+ for i < len(p.s) && p.s[i] != '\n' {
+ i++
+ }
+ if i == len(p.s) {
+ break
+ }
+ }
+ if p.s[i] == '\n' {
+ p.line++
+ }
+ i++
+ }
+ p.offset += i
+ p.s = p.s[i:len(p.s)]
+ if len(p.s) == 0 {
+ p.done = true
+ }
+}
+
+func (p *textParser) advance() {
+ // Skip whitespace
+ p.skipWhitespace()
+ if p.done {
+ return
+ }
+
+ // Start of non-whitespace
+ p.cur.err = nil
+ p.cur.offset, p.cur.line = p.offset, p.line
+ p.cur.unquoted = ""
+ switch p.s[0] {
+ case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
+ // Single symbol
+ p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+ case '"', '\'':
+ // Quoted string
+ i := 1
+ for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+ if p.s[i] == '\\' && i+1 < len(p.s) {
+ // skip escaped char
+ i++
+ }
+ i++
+ }
+ if i >= len(p.s) || p.s[i] != p.s[0] {
+ p.errorf("unmatched quote")
+ return
+ }
+ unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+ if err != nil {
+ p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+ return
+ }
+ p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+ p.cur.unquoted = unq
+ default:
+ i := 0
+ for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+ i++
+ }
+ if i == 0 {
+ p.errorf("unexpected byte %#x", p.s[0])
+ return
+ }
+ p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+ }
+ p.offset += len(p.cur.value)
+}
+
+var (
+ errBadUTF8 = errors.New("proto: bad UTF-8")
+ errBadHex = errors.New("proto: bad hexadecimal")
+)
+
+func unquoteC(s string, quote rune) (string, error) {
+ // This is based on C++'s tokenizer.cc.
+ // Despite its name, this is *not* parsing C syntax.
+ // For instance, "\0" is an invalid quoted string.
+
+ // Avoid allocation in trivial cases.
+ simple := true
+ for _, r := range s {
+ if r == '\\' || r == quote {
+ simple = false
+ break
+ }
+ }
+ if simple {
+ return s, nil
+ }
+
+ buf := make([]byte, 0, 3*len(s)/2)
+ for len(s) > 0 {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", errBadUTF8
+ }
+ s = s[n:]
+ if r != '\\' {
+ if r < utf8.RuneSelf {
+ buf = append(buf, byte(r))
+ } else {
+ buf = append(buf, string(r)...)
+ }
+ continue
+ }
+
+ ch, tail, err := unescape(s)
+ if err != nil {
+ return "", err
+ }
+ buf = append(buf, ch...)
+ s = tail
+ }
+ return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", "", errBadUTF8
+ }
+ s = s[n:]
+ switch r {
+ case 'a':
+ return "\a", s, nil
+ case 'b':
+ return "\b", s, nil
+ case 'f':
+ return "\f", s, nil
+ case 'n':
+ return "\n", s, nil
+ case 'r':
+ return "\r", s, nil
+ case 't':
+ return "\t", s, nil
+ case 'v':
+ return "\v", s, nil
+ case '?':
+ return "?", s, nil // trigraph workaround
+ case '\'', '"', '\\':
+ return string(r), s, nil
+ case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':
+ if len(s) < 2 {
+ return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+ }
+ base := 8
+ ss := s[:2]
+ s = s[2:]
+ if r == 'x' || r == 'X' {
+ base = 16
+ } else {
+ ss = string(r) + ss
+ }
+ i, err := strconv.ParseUint(ss, base, 8)
+ if err != nil {
+ return "", "", err
+ }
+ return string([]byte{byte(i)}), s, nil
+ case 'u', 'U':
+ n := 4
+ if r == 'U' {
+ n = 8
+ }
+ if len(s) < n {
+ return "", "", fmt.Errorf(`\%c requires %d digits`, r, n)
+ }
+
+ bs := make([]byte, n/2)
+ for i := 0; i < n; i += 2 {
+ a, ok1 := unhex(s[i])
+ b, ok2 := unhex(s[i+1])
+ if !ok1 || !ok2 {
+ return "", "", errBadHex
+ }
+ bs[i/2] = a<<4 | b
+ }
+ s = s[n:]
+ return string(bs), s, nil
+ }
+ return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+// Adapted from src/pkg/strconv/quote.go.
+func unhex(b byte) (v byte, ok bool) {
+ switch {
+ case '0' <= b && b <= '9':
+ return b - '0', true
+ case 'a' <= b && b <= 'f':
+ return b - 'a' + 10, true
+ case 'A' <= b && b <= 'F':
+ return b - 'A' + 10, true
+ }
+ return 0, false
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+ if p.backed || p.done {
+ p.backed = false
+ return &p.cur
+ }
+ p.advance()
+ if p.done {
+ p.cur.value = ""
+ } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
+ // Look for multiple quoted strings separated by whitespace,
+ // and concatenate them.
+ cat := p.cur
+ for {
+ p.skipWhitespace()
+ if p.done || !isQuote(p.s[0]) {
+ break
+ }
+ p.advance()
+ if p.cur.err != nil {
+ return &p.cur
+ }
+ cat.value += " " + p.cur.value
+ cat.unquoted += p.cur.unquoted
+ }
+ p.done = false // parser may have seen EOF, but we want to return cat
+ p.cur = cat
+ }
+ return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != s {
+ p.back()
+ return p.errorf("expected %q, found %q", s, tok.value)
+ }
+ return nil
+}
+
+// Return a RequiredNotSetError indicating which required field was not set.
+func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < st.NumField(); i++ {
+ if !isNil(sv.Field(i)) {
+ continue
+ }
+
+ props := sprops.Prop[i]
+ if props.Required {
+ return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
+ }
+ }
+ return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
+}
+
+// Returns the index in the struct for the named field, as well as the parsed tag properties.
+func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
+ i, ok := sprops.decoderOrigNames[name]
+ if ok {
+ return i, sprops.Prop[i], true
+ }
+ return -1, nil, false
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ":" {
+ // Colon is optional when the field is a group or message.
+ needColon := true
+ switch props.Wire {
+ case "group":
+ needColon = false
+ case "bytes":
+ // A "bytes" field is either a message, a string, or a repeated field;
+ // those three become *T, *string and []T respectively, so we can check for
+ // this field being a pointer to a non-string.
+ if typ.Kind() == reflect.Ptr {
+ // *T or *string
+ if typ.Elem().Kind() == reflect.String {
+ break
+ }
+ } else if typ.Kind() == reflect.Slice {
+ // []T or []*T
+ if typ.Elem().Kind() != reflect.Ptr {
+ break
+ }
+ } else if typ.Kind() == reflect.String {
+ // The proto3 exception is for a string field,
+ // which requires a colon.
+ break
+ }
+ needColon = false
+ }
+ if needColon {
+ return p.errorf("expected ':', found %q", tok.value)
+ }
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ reqCount := sprops.reqCount
+ var reqFieldErr error
+ fieldSet := make(map[string]bool)
+ // A struct is a sequence of "name: value", terminated by one of
+ // '>' or '}', or the end of the input. A name may also be
+ // "[extension]" or "[type/url]".
+ //
+ // The whole struct can also be an expanded Any message, like:
+ // [type/url] < ... struct contents ... >
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ if tok.value == "[" {
+ // Looks like an extension or an Any.
+ //
+ // TODO: Check whether we need to handle
+ // namespace rooted names (e.g. ".something.Foo").
+ extName, err := p.consumeExtName()
+ if err != nil {
+ return err
+ }
+
+ if s := strings.LastIndex(extName, "/"); s >= 0 {
+ // If it contains a slash, it's an Any type URL.
+ messageName := extName[s+1:]
+ mt := MessageType(messageName)
+ if mt == nil {
+ return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
+ }
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ // consume an optional colon
+ if tok.value == ":" {
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ }
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ v := reflect.New(mt.Elem())
+ if pe := p.readStruct(v.Elem(), terminator); pe != nil {
+ return pe
+ }
+ b, err := Marshal(v.Interface().(Message))
+ if err != nil {
+ return p.errorf("failed to marshal message of type %q: %v", messageName, err)
+ }
+ sv.FieldByName("TypeUrl").SetString(extName)
+ sv.FieldByName("Value").SetBytes(b)
+ continue
+ }
+
+ var desc *ExtensionDesc
+ // This could be faster, but it's functional.
+ // TODO: Do something smarter than a linear scan.
+ for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
+ if d.Name == extName {
+ desc = d
+ break
+ }
+ }
+ if desc == nil {
+ return p.errorf("unrecognized extension %q", extName)
+ }
+
+ props := &Properties{}
+ props.Parse(desc.Tag)
+
+ typ := reflect.TypeOf(desc.ExtensionType)
+ if err := p.checkForColon(props, typ); err != nil {
+ return err
+ }
+
+ rep := desc.repeated()
+
+ // Read the extension structure, and set it in
+ // the value we're constructing.
+ var ext reflect.Value
+ if !rep {
+ ext = reflect.New(typ).Elem()
+ } else {
+ ext = reflect.New(typ.Elem()).Elem()
+ }
+ if err := p.readAny(ext, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ }
+ ep := sv.Addr().Interface().(extendableProto)
+ if !rep {
+ SetExtension(ep, desc, ext.Interface())
+ } else {
+ old, err := GetExtension(ep, desc)
+ var sl reflect.Value
+ if err == nil {
+ sl = reflect.ValueOf(old) // existing slice
+ } else {
+ sl = reflect.MakeSlice(typ, 0, 1)
+ }
+ sl = reflect.Append(sl, ext)
+ SetExtension(ep, desc, sl.Interface())
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // This is a normal, non-extension field.
+ name := tok.value
+ var dst reflect.Value
+ fi, props, ok := structFieldByName(sprops, name)
+ if ok {
+ dst = sv.Field(fi)
+ } else if oop, ok := sprops.OneofTypes[name]; ok {
+ // It is a oneof.
+ props = oop.Prop
+ nv := reflect.New(oop.Type.Elem())
+ dst = nv.Elem().Field(0)
+ sv.Field(oop.Field).Set(nv)
+ }
+ if !dst.IsValid() {
+ return p.errorf("unknown field name %q in %v", name, st)
+ }
+
+ if dst.Kind() == reflect.Map {
+ // Consume any colon.
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Construct the map if it doesn't already exist.
+ if dst.IsNil() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ }
+ key := reflect.New(dst.Type().Key()).Elem()
+ val := reflect.New(dst.Type().Elem()).Elem()
+
+ // The map entry should be this sequence of tokens:
+ // < key : KEY value : VALUE >
+ // Technically the "key" and "value" could come in any order,
+ // but in practice they won't.
+
+ tok := p.next()
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ if err := p.consumeToken("key"); err != nil {
+ return err
+ }
+ if err := p.consumeToken(":"); err != nil {
+ return err
+ }
+ if err := p.readAny(key, props.mkeyprop); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ if err := p.consumeToken("value"); err != nil {
+ return err
+ }
+ if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil {
+ return err
+ }
+ if err := p.readAny(val, props.mvalprop); err != nil {
+ return err
+ }
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+ if err := p.consumeToken(terminator); err != nil {
+ return err
+ }
+
+ dst.SetMapIndex(key, val)
+ continue
+ }
+
+ // Check that it's not already set if it's not a repeated field.
+ if !props.Repeated && fieldSet[name] {
+ return p.errorf("non-repeated field %q was repeated", name)
+ }
+
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Parse into the field.
+ fieldSet[name] = true
+ if err := p.readAny(dst, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ } else if props.Required {
+ reqCount--
+ }
+
+ if err := p.consumeOptionalSeparator(); err != nil {
+ return err
+ }
+
+ }
+
+ if reqCount > 0 {
+ return p.missingRequiredFieldError(sv)
+ }
+ return reqFieldErr
+}
+
+// consumeExtName consumes extension name or expanded Any type URL and the
+// following ']'. It returns the name or URL consumed.
+func (p *textParser) consumeExtName() (string, error) {
+ tok := p.next()
+ if tok.err != nil {
+ return "", tok.err
+ }
+
+ // If extension name or type url is quoted, it's a single token.
+ if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
+ name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
+ if err != nil {
+ return "", err
+ }
+ return name, p.consumeToken("]")
+ }
+
+ // Consume everything up to "]"
+ var parts []string
+ for tok.value != "]" {
+ parts = append(parts, tok.value)
+ tok = p.next()
+ if tok.err != nil {
+ return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
+ }
+ }
+ return strings.Join(parts, ""), nil
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in readStruct to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ";" && tok.value != "," {
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readAny(v reflect.Value, props *Properties) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "" {
+ return p.errorf("unexpected EOF")
+ }
+
+ switch fv := v; fv.Kind() {
+ case reflect.Slice:
+ at := v.Type()
+ if at.Elem().Kind() == reflect.Uint8 {
+ // Special case for []byte
+ if tok.value[0] != '"' && tok.value[0] != '\'' {
+ // Deliberately written out here, as the error after
+ // this switch statement would write "invalid []byte: ...",
+ // which is not as user-friendly.
+ return p.errorf("invalid string: %v", tok.value)
+ }
+ bytes := []byte(tok.unquoted)
+ fv.Set(reflect.ValueOf(bytes))
+ return nil
+ }
+ // Repeated field.
+ if tok.value == "[" {
+ // Repeated field with list notation, like [1,2,3].
+ for {
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ err := p.readAny(fv.Index(fv.Len()-1), props)
+ if err != nil {
+ return err
+ }
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "]" {
+ break
+ }
+ if tok.value != "," {
+ return p.errorf("Expected ']' or ',' found %q", tok.value)
+ }
+ }
+ return nil
+ }
+ // One value of the repeated field.
+ p.back()
+ fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+ return p.readAny(fv.Index(fv.Len()-1), props)
+ case reflect.Bool:
+ // Either "true", "false", 1 or 0.
+ switch tok.value {
+ case "true", "1":
+ fv.SetBool(true)
+ return nil
+ case "false", "0":
+ fv.SetBool(false)
+ return nil
+ }
+ case reflect.Float32, reflect.Float64:
+ v := tok.value
+ // Ignore 'f' for compatibility with output generated by C++, but don't
+ // remove 'f' when the value is "-inf" or "inf".
+ if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
+ v = v[:len(v)-1]
+ }
+ if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
+ fv.SetFloat(f)
+ return nil
+ }
+ case reflect.Int32:
+ if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ if len(props.Enum) == 0 {
+ break
+ }
+ m, ok := enumValueMaps[props.Enum]
+ if !ok {
+ break
+ }
+ x, ok := m[tok.value]
+ if !ok {
+ break
+ }
+ fv.SetInt(int64(x))
+ return nil
+ case reflect.Int64:
+ if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ case reflect.Ptr:
+ // A basic field (indirected through pointer), or a repeated message/group
+ p.back()
+ fv.Set(reflect.New(fv.Type().Elem()))
+ return p.readAny(fv.Elem(), props)
+ case reflect.String:
+ if tok.value[0] == '"' || tok.value[0] == '\'' {
+ fv.SetString(tok.unquoted)
+ return nil
+ }
+ case reflect.Struct:
+ var terminator string
+ switch tok.value {
+ case "{":
+ terminator = "}"
+ case "<":
+ terminator = ">"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ // TODO: Handle nested messages which implement encoding.TextUnmarshaler.
+ return p.readStruct(fv, terminator)
+ case reflect.Uint32:
+ if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+ fv.SetUint(uint64(x))
+ return nil
+ }
+ case reflect.Uint64:
+ if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+ fv.SetUint(x)
+ return nil
+ }
+ }
+ return p.errorf("invalid %v: %v", v.Type(), tok.value)
+}
+
+// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
+// before starting to unmarshal, so any existing data in pb is always removed.
+// If a required field is not set and no other error occurs,
+// UnmarshalText returns *RequiredNotSetError.
+func UnmarshalText(s string, pb Message) error {
+ if um, ok := pb.(encoding.TextUnmarshaler); ok {
+ err := um.UnmarshalText([]byte(s))
+ return err
+ }
+ pb.Reset()
+ v := reflect.ValueOf(pb)
+ if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {
+ return pe
+ }
+ return nil
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/LICENSE b/vendor/github.com/jmespath/go-jmespath/LICENSE
new file mode 100644
index 0000000..b03310a
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/LICENSE
@@ -0,0 +1,13 @@
+Copyright 2015 James Saryerwinnie
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/github.com/jmespath/go-jmespath/Makefile b/vendor/github.com/jmespath/go-jmespath/Makefile
new file mode 100644
index 0000000..a828d28
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/Makefile
@@ -0,0 +1,44 @@
+
+CMD = jpgo
+
+help:
+ @echo "Please use \`make <target>' where <target> is one of"
+ @echo " test to run all the tests"
+ @echo " build to build the library and jp executable"
+ @echo " generate to run codegen"
+
+
+generate:
+ go generate ./...
+
+build:
+ rm -f $(CMD)
+ go build ./...
+ rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./...
+ mv cmd/$(CMD)/$(CMD) .
+
+test:
+ go test -v ./...
+
+check:
+ go vet ./...
+ @echo "golint ./..."
+ @lint=`golint ./...`; \
+ lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \
+ echo "$$lint"; \
+ if [ "$$lint" != "" ]; then exit 1; fi
+
+htmlc:
+ go test -coverprofile="/tmp/jpcov" && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov
+
+buildfuzz:
+ go-fuzz-build github.com/jmespath/go-jmespath/fuzz
+
+fuzz: buildfuzz
+ go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata
+
+bench:
+ go test -bench . -cpuprofile cpu.out
+
+pprof-cpu:
+ go tool pprof ./go-jmespath.test ./cpu.out
diff --git a/vendor/github.com/jmespath/go-jmespath/README.md b/vendor/github.com/jmespath/go-jmespath/README.md
new file mode 100644
index 0000000..187ef67
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/README.md
@@ -0,0 +1,7 @@
+# go-jmespath - A JMESPath implementation in Go
+
+[![Build Status](https://img.shields.io/travis/jmespath/go-jmespath.svg)](https://travis-ci.org/jmespath/go-jmespath)
+
+
+
+See http://jmespath.org for more info.
diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go
new file mode 100644
index 0000000..9cfa988
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/api.go
@@ -0,0 +1,49 @@
+package jmespath
+
+import "strconv"
+
+// JmesPath is the epresentation of a compiled JMES path query. A JmesPath is
+// safe for concurrent use by multiple goroutines.
+type JMESPath struct {
+ ast ASTNode
+ intr *treeInterpreter
+}
+
+// Compile parses a JMESPath expression and returns, if successful, a JMESPath
+// object that can be used to match against data.
+func Compile(expression string) (*JMESPath, error) {
+ parser := NewParser()
+ ast, err := parser.Parse(expression)
+ if err != nil {
+ return nil, err
+ }
+ jmespath := &JMESPath{ast: ast, intr: newInterpreter()}
+ return jmespath, nil
+}
+
+// MustCompile is like Compile but panics if the expression cannot be parsed.
+// It simplifies safe initialization of global variables holding compiled
+// JMESPaths.
+func MustCompile(expression string) *JMESPath {
+ jmespath, err := Compile(expression)
+ if err != nil {
+ panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error())
+ }
+ return jmespath
+}
+
+// Search evaluates a JMESPath expression against input data and returns the result.
+func (jp *JMESPath) Search(data interface{}) (interface{}, error) {
+ return jp.intr.Execute(jp.ast, data)
+}
+
+// Search evaluates a JMESPath expression against input data and returns the result.
+func Search(expression string, data interface{}) (interface{}, error) {
+ intr := newInterpreter()
+ parser := NewParser()
+ ast, err := parser.Parse(expression)
+ if err != nil {
+ return nil, err
+ }
+ return intr.Execute(ast, data)
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go
new file mode 100644
index 0000000..1cd2d23
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go
@@ -0,0 +1,16 @@
+// generated by stringer -type astNodeType; DO NOT EDIT
+
+package jmespath
+
+import "fmt"
+
+const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection"
+
+var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307}
+
+func (i astNodeType) String() string {
+ if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) {
+ return fmt.Sprintf("astNodeType(%d)", i)
+ }
+ return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]]
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/functions.go b/vendor/github.com/jmespath/go-jmespath/functions.go
new file mode 100644
index 0000000..9b7cd89
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/functions.go
@@ -0,0 +1,842 @@
+package jmespath
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type jpFunction func(arguments []interface{}) (interface{}, error)
+
+type jpType string
+
+const (
+ jpUnknown jpType = "unknown"
+ jpNumber jpType = "number"
+ jpString jpType = "string"
+ jpArray jpType = "array"
+ jpObject jpType = "object"
+ jpArrayNumber jpType = "array[number]"
+ jpArrayString jpType = "array[string]"
+ jpExpref jpType = "expref"
+ jpAny jpType = "any"
+)
+
+type functionEntry struct {
+ name string
+ arguments []argSpec
+ handler jpFunction
+ hasExpRef bool
+}
+
+type argSpec struct {
+ types []jpType
+ variadic bool
+}
+
+type byExprString struct {
+ intr *treeInterpreter
+ node ASTNode
+ items []interface{}
+ hasError bool
+}
+
+func (a *byExprString) Len() int {
+ return len(a.items)
+}
+func (a *byExprString) Swap(i, j int) {
+ a.items[i], a.items[j] = a.items[j], a.items[i]
+}
+func (a *byExprString) Less(i, j int) bool {
+ first, err := a.intr.Execute(a.node, a.items[i])
+ if err != nil {
+ a.hasError = true
+ // Return a dummy value.
+ return true
+ }
+ ith, ok := first.(string)
+ if !ok {
+ a.hasError = true
+ return true
+ }
+ second, err := a.intr.Execute(a.node, a.items[j])
+ if err != nil {
+ a.hasError = true
+ // Return a dummy value.
+ return true
+ }
+ jth, ok := second.(string)
+ if !ok {
+ a.hasError = true
+ return true
+ }
+ return ith < jth
+}
+
+type byExprFloat struct {
+ intr *treeInterpreter
+ node ASTNode
+ items []interface{}
+ hasError bool
+}
+
+func (a *byExprFloat) Len() int {
+ return len(a.items)
+}
+func (a *byExprFloat) Swap(i, j int) {
+ a.items[i], a.items[j] = a.items[j], a.items[i]
+}
+func (a *byExprFloat) Less(i, j int) bool {
+ first, err := a.intr.Execute(a.node, a.items[i])
+ if err != nil {
+ a.hasError = true
+ // Return a dummy value.
+ return true
+ }
+ ith, ok := first.(float64)
+ if !ok {
+ a.hasError = true
+ return true
+ }
+ second, err := a.intr.Execute(a.node, a.items[j])
+ if err != nil {
+ a.hasError = true
+ // Return a dummy value.
+ return true
+ }
+ jth, ok := second.(float64)
+ if !ok {
+ a.hasError = true
+ return true
+ }
+ return ith < jth
+}
+
+type functionCaller struct {
+ functionTable map[string]functionEntry
+}
+
+func newFunctionCaller() *functionCaller {
+ caller := &functionCaller{}
+ caller.functionTable = map[string]functionEntry{
+ "length": {
+ name: "length",
+ arguments: []argSpec{
+ {types: []jpType{jpString, jpArray, jpObject}},
+ },
+ handler: jpfLength,
+ },
+ "starts_with": {
+ name: "starts_with",
+ arguments: []argSpec{
+ {types: []jpType{jpString}},
+ {types: []jpType{jpString}},
+ },
+ handler: jpfStartsWith,
+ },
+ "abs": {
+ name: "abs",
+ arguments: []argSpec{
+ {types: []jpType{jpNumber}},
+ },
+ handler: jpfAbs,
+ },
+ "avg": {
+ name: "avg",
+ arguments: []argSpec{
+ {types: []jpType{jpArrayNumber}},
+ },
+ handler: jpfAvg,
+ },
+ "ceil": {
+ name: "ceil",
+ arguments: []argSpec{
+ {types: []jpType{jpNumber}},
+ },
+ handler: jpfCeil,
+ },
+ "contains": {
+ name: "contains",
+ arguments: []argSpec{
+ {types: []jpType{jpArray, jpString}},
+ {types: []jpType{jpAny}},
+ },
+ handler: jpfContains,
+ },
+ "ends_with": {
+ name: "ends_with",
+ arguments: []argSpec{
+ {types: []jpType{jpString}},
+ {types: []jpType{jpString}},
+ },
+ handler: jpfEndsWith,
+ },
+ "floor": {
+ name: "floor",
+ arguments: []argSpec{
+ {types: []jpType{jpNumber}},
+ },
+ handler: jpfFloor,
+ },
+ "map": {
+ name: "amp",
+ arguments: []argSpec{
+ {types: []jpType{jpExpref}},
+ {types: []jpType{jpArray}},
+ },
+ handler: jpfMap,
+ hasExpRef: true,
+ },
+ "max": {
+ name: "max",
+ arguments: []argSpec{
+ {types: []jpType{jpArrayNumber, jpArrayString}},
+ },
+ handler: jpfMax,
+ },
+ "merge": {
+ name: "merge",
+ arguments: []argSpec{
+ {types: []jpType{jpObject}, variadic: true},
+ },
+ handler: jpfMerge,
+ },
+ "max_by": {
+ name: "max_by",
+ arguments: []argSpec{
+ {types: []jpType{jpArray}},
+ {types: []jpType{jpExpref}},
+ },
+ handler: jpfMaxBy,
+ hasExpRef: true,
+ },
+ "sum": {
+ name: "sum",
+ arguments: []argSpec{
+ {types: []jpType{jpArrayNumber}},
+ },
+ handler: jpfSum,
+ },
+ "min": {
+ name: "min",
+ arguments: []argSpec{
+ {types: []jpType{jpArrayNumber, jpArrayString}},
+ },
+ handler: jpfMin,
+ },
+ "min_by": {
+ name: "min_by",
+ arguments: []argSpec{
+ {types: []jpType{jpArray}},
+ {types: []jpType{jpExpref}},
+ },
+ handler: jpfMinBy,
+ hasExpRef: true,
+ },
+ "type": {
+ name: "type",
+ arguments: []argSpec{
+ {types: []jpType{jpAny}},
+ },
+ handler: jpfType,
+ },
+ "keys": {
+ name: "keys",
+ arguments: []argSpec{
+ {types: []jpType{jpObject}},
+ },
+ handler: jpfKeys,
+ },
+ "values": {
+ name: "values",
+ arguments: []argSpec{
+ {types: []jpType{jpObject}},
+ },
+ handler: jpfValues,
+ },
+ "sort": {
+ name: "sort",
+ arguments: []argSpec{
+ {types: []jpType{jpArrayString, jpArrayNumber}},
+ },
+ handler: jpfSort,
+ },
+ "sort_by": {
+ name: "sort_by",
+ arguments: []argSpec{
+ {types: []jpType{jpArray}},
+ {types: []jpType{jpExpref}},
+ },
+ handler: jpfSortBy,
+ hasExpRef: true,
+ },
+ "join": {
+ name: "join",
+ arguments: []argSpec{
+ {types: []jpType{jpString}},
+ {types: []jpType{jpArrayString}},
+ },
+ handler: jpfJoin,
+ },
+ "reverse": {
+ name: "reverse",
+ arguments: []argSpec{
+ {types: []jpType{jpArray, jpString}},
+ },
+ handler: jpfReverse,
+ },
+ "to_array": {
+ name: "to_array",
+ arguments: []argSpec{
+ {types: []jpType{jpAny}},
+ },
+ handler: jpfToArray,
+ },
+ "to_string": {
+ name: "to_string",
+ arguments: []argSpec{
+ {types: []jpType{jpAny}},
+ },
+ handler: jpfToString,
+ },
+ "to_number": {
+ name: "to_number",
+ arguments: []argSpec{
+ {types: []jpType{jpAny}},
+ },
+ handler: jpfToNumber,
+ },
+ "not_null": {
+ name: "not_null",
+ arguments: []argSpec{
+ {types: []jpType{jpAny}, variadic: true},
+ },
+ handler: jpfNotNull,
+ },
+ }
+ return caller
+}
+
+func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) {
+ if len(e.arguments) == 0 {
+ return arguments, nil
+ }
+ if !e.arguments[len(e.arguments)-1].variadic {
+ if len(e.arguments) != len(arguments) {
+ return nil, errors.New("incorrect number of args")
+ }
+ for i, spec := range e.arguments {
+ userArg := arguments[i]
+ err := spec.typeCheck(userArg)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return arguments, nil
+ }
+ if len(arguments) < len(e.arguments) {
+ return nil, errors.New("Invalid arity.")
+ }
+ return arguments, nil
+}
+
+func (a *argSpec) typeCheck(arg interface{}) error {
+ for _, t := range a.types {
+ switch t {
+ case jpNumber:
+ if _, ok := arg.(float64); ok {
+ return nil
+ }
+ case jpString:
+ if _, ok := arg.(string); ok {
+ return nil
+ }
+ case jpArray:
+ if isSliceType(arg) {
+ return nil
+ }
+ case jpObject:
+ if _, ok := arg.(map[string]interface{}); ok {
+ return nil
+ }
+ case jpArrayNumber:
+ if _, ok := toArrayNum(arg); ok {
+ return nil
+ }
+ case jpArrayString:
+ if _, ok := toArrayStr(arg); ok {
+ return nil
+ }
+ case jpAny:
+ return nil
+ case jpExpref:
+ if _, ok := arg.(expRef); ok {
+ return nil
+ }
+ }
+ }
+ return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types)
+}
+
+func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) {
+ entry, ok := f.functionTable[name]
+ if !ok {
+ return nil, errors.New("unknown function: " + name)
+ }
+ resolvedArgs, err := entry.resolveArgs(arguments)
+ if err != nil {
+ return nil, err
+ }
+ if entry.hasExpRef {
+ var extra []interface{}
+ extra = append(extra, intr)
+ resolvedArgs = append(extra, resolvedArgs...)
+ }
+ return entry.handler(resolvedArgs)
+}
+
+func jpfAbs(arguments []interface{}) (interface{}, error) {
+ num := arguments[0].(float64)
+ return math.Abs(num), nil
+}
+
+func jpfLength(arguments []interface{}) (interface{}, error) {
+ arg := arguments[0]
+ if c, ok := arg.(string); ok {
+ return float64(utf8.RuneCountInString(c)), nil
+ } else if isSliceType(arg) {
+ v := reflect.ValueOf(arg)
+ return float64(v.Len()), nil
+ } else if c, ok := arg.(map[string]interface{}); ok {
+ return float64(len(c)), nil
+ }
+ return nil, errors.New("could not compute length()")
+}
+
+func jpfStartsWith(arguments []interface{}) (interface{}, error) {
+ search := arguments[0].(string)
+ prefix := arguments[1].(string)
+ return strings.HasPrefix(search, prefix), nil
+}
+
+func jpfAvg(arguments []interface{}) (interface{}, error) {
+ // We've already type checked the value so we can safely use
+ // type assertions.
+ args := arguments[0].([]interface{})
+ length := float64(len(args))
+ numerator := 0.0
+ for _, n := range args {
+ numerator += n.(float64)
+ }
+ return numerator / length, nil
+}
+func jpfCeil(arguments []interface{}) (interface{}, error) {
+ val := arguments[0].(float64)
+ return math.Ceil(val), nil
+}
+func jpfContains(arguments []interface{}) (interface{}, error) {
+ search := arguments[0]
+ el := arguments[1]
+ if searchStr, ok := search.(string); ok {
+ if elStr, ok := el.(string); ok {
+ return strings.Index(searchStr, elStr) != -1, nil
+ }
+ return false, nil
+ }
+ // Otherwise this is a generic contains for []interface{}
+ general := search.([]interface{})
+ for _, item := range general {
+ if item == el {
+ return true, nil
+ }
+ }
+ return false, nil
+}
+func jpfEndsWith(arguments []interface{}) (interface{}, error) {
+ search := arguments[0].(string)
+ suffix := arguments[1].(string)
+ return strings.HasSuffix(search, suffix), nil
+}
+func jpfFloor(arguments []interface{}) (interface{}, error) {
+ val := arguments[0].(float64)
+ return math.Floor(val), nil
+}
+func jpfMap(arguments []interface{}) (interface{}, error) {
+ intr := arguments[0].(*treeInterpreter)
+ exp := arguments[1].(expRef)
+ node := exp.ref
+ arr := arguments[2].([]interface{})
+ mapped := make([]interface{}, 0, len(arr))
+ for _, value := range arr {
+ current, err := intr.Execute(node, value)
+ if err != nil {
+ return nil, err
+ }
+ mapped = append(mapped, current)
+ }
+ return mapped, nil
+}
+func jpfMax(arguments []interface{}) (interface{}, error) {
+ if items, ok := toArrayNum(arguments[0]); ok {
+ if len(items) == 0 {
+ return nil, nil
+ }
+ if len(items) == 1 {
+ return items[0], nil
+ }
+ best := items[0]
+ for _, item := range items[1:] {
+ if item > best {
+ best = item
+ }
+ }
+ return best, nil
+ }
+ // Otherwise we're dealing with a max() of strings.
+ items, _ := toArrayStr(arguments[0])
+ if len(items) == 0 {
+ return nil, nil
+ }
+ if len(items) == 1 {
+ return items[0], nil
+ }
+ best := items[0]
+ for _, item := range items[1:] {
+ if item > best {
+ best = item
+ }
+ }
+ return best, nil
+}
+func jpfMerge(arguments []interface{}) (interface{}, error) {
+ final := make(map[string]interface{})
+ for _, m := range arguments {
+ mapped := m.(map[string]interface{})
+ for key, value := range mapped {
+ final[key] = value
+ }
+ }
+ return final, nil
+}
+func jpfMaxBy(arguments []interface{}) (interface{}, error) {
+ intr := arguments[0].(*treeInterpreter)
+ arr := arguments[1].([]interface{})
+ exp := arguments[2].(expRef)
+ node := exp.ref
+ if len(arr) == 0 {
+ return nil, nil
+ } else if len(arr) == 1 {
+ return arr[0], nil
+ }
+ start, err := intr.Execute(node, arr[0])
+ if err != nil {
+ return nil, err
+ }
+ switch t := start.(type) {
+ case float64:
+ bestVal := t
+ bestItem := arr[0]
+ for _, item := range arr[1:] {
+ result, err := intr.Execute(node, item)
+ if err != nil {
+ return nil, err
+ }
+ current, ok := result.(float64)
+ if !ok {
+ return nil, errors.New("invalid type, must be number")
+ }
+ if current > bestVal {
+ bestVal = current
+ bestItem = item
+ }
+ }
+ return bestItem, nil
+ case string:
+ bestVal := t
+ bestItem := arr[0]
+ for _, item := range arr[1:] {
+ result, err := intr.Execute(node, item)
+ if err != nil {
+ return nil, err
+ }
+ current, ok := result.(string)
+ if !ok {
+ return nil, errors.New("invalid type, must be string")
+ }
+ if current > bestVal {
+ bestVal = current
+ bestItem = item
+ }
+ }
+ return bestItem, nil
+ default:
+ return nil, errors.New("invalid type, must be number of string")
+ }
+}
+func jpfSum(arguments []interface{}) (interface{}, error) {
+ items, _ := toArrayNum(arguments[0])
+ sum := 0.0
+ for _, item := range items {
+ sum += item
+ }
+ return sum, nil
+}
+
+func jpfMin(arguments []interface{}) (interface{}, error) {
+ if items, ok := toArrayNum(arguments[0]); ok {
+ if len(items) == 0 {
+ return nil, nil
+ }
+ if len(items) == 1 {
+ return items[0], nil
+ }
+ best := items[0]
+ for _, item := range items[1:] {
+ if item < best {
+ best = item
+ }
+ }
+ return best, nil
+ }
+ items, _ := toArrayStr(arguments[0])
+ if len(items) == 0 {
+ return nil, nil
+ }
+ if len(items) == 1 {
+ return items[0], nil
+ }
+ best := items[0]
+ for _, item := range items[1:] {
+ if item < best {
+ best = item
+ }
+ }
+ return best, nil
+}
+
+func jpfMinBy(arguments []interface{}) (interface{}, error) {
+ intr := arguments[0].(*treeInterpreter)
+ arr := arguments[1].([]interface{})
+ exp := arguments[2].(expRef)
+ node := exp.ref
+ if len(arr) == 0 {
+ return nil, nil
+ } else if len(arr) == 1 {
+ return arr[0], nil
+ }
+ start, err := intr.Execute(node, arr[0])
+ if err != nil {
+ return nil, err
+ }
+ if t, ok := start.(float64); ok {
+ bestVal := t
+ bestItem := arr[0]
+ for _, item := range arr[1:] {
+ result, err := intr.Execute(node, item)
+ if err != nil {
+ return nil, err
+ }
+ current, ok := result.(float64)
+ if !ok {
+ return nil, errors.New("invalid type, must be number")
+ }
+ if current < bestVal {
+ bestVal = current
+ bestItem = item
+ }
+ }
+ return bestItem, nil
+ } else if t, ok := start.(string); ok {
+ bestVal := t
+ bestItem := arr[0]
+ for _, item := range arr[1:] {
+ result, err := intr.Execute(node, item)
+ if err != nil {
+ return nil, err
+ }
+ current, ok := result.(string)
+ if !ok {
+ return nil, errors.New("invalid type, must be string")
+ }
+ if current < bestVal {
+ bestVal = current
+ bestItem = item
+ }
+ }
+ return bestItem, nil
+ } else {
+ return nil, errors.New("invalid type, must be number of string")
+ }
+}
+func jpfType(arguments []interface{}) (interface{}, error) {
+ arg := arguments[0]
+ if _, ok := arg.(float64); ok {
+ return "number", nil
+ }
+ if _, ok := arg.(string); ok {
+ return "string", nil
+ }
+ if _, ok := arg.([]interface{}); ok {
+ return "array", nil
+ }
+ if _, ok := arg.(map[string]interface{}); ok {
+ return "object", nil
+ }
+ if arg == nil {
+ return "null", nil
+ }
+ if arg == true || arg == false {
+ return "boolean", nil
+ }
+ return nil, errors.New("unknown type")
+}
+func jpfKeys(arguments []interface{}) (interface{}, error) {
+ arg := arguments[0].(map[string]interface{})
+ collected := make([]interface{}, 0, len(arg))
+ for key := range arg {
+ collected = append(collected, key)
+ }
+ return collected, nil
+}
+func jpfValues(arguments []interface{}) (interface{}, error) {
+ arg := arguments[0].(map[string]interface{})
+ collected := make([]interface{}, 0, len(arg))
+ for _, value := range arg {
+ collected = append(collected, value)
+ }
+ return collected, nil
+}
+func jpfSort(arguments []interface{}) (interface{}, error) {
+ if items, ok := toArrayNum(arguments[0]); ok {
+ d := sort.Float64Slice(items)
+ sort.Stable(d)
+ final := make([]interface{}, len(d))
+ for i, val := range d {
+ final[i] = val
+ }
+ return final, nil
+ }
+ // Otherwise we're dealing with sort()'ing strings.
+ items, _ := toArrayStr(arguments[0])
+ d := sort.StringSlice(items)
+ sort.Stable(d)
+ final := make([]interface{}, len(d))
+ for i, val := range d {
+ final[i] = val
+ }
+ return final, nil
+}
+func jpfSortBy(arguments []interface{}) (interface{}, error) {
+ intr := arguments[0].(*treeInterpreter)
+ arr := arguments[1].([]interface{})
+ exp := arguments[2].(expRef)
+ node := exp.ref
+ if len(arr) == 0 {
+ return arr, nil
+ } else if len(arr) == 1 {
+ return arr, nil
+ }
+ start, err := intr.Execute(node, arr[0])
+ if err != nil {
+ return nil, err
+ }
+ if _, ok := start.(float64); ok {
+ sortable := &byExprFloat{intr, node, arr, false}
+ sort.Stable(sortable)
+ if sortable.hasError {
+ return nil, errors.New("error in sort_by comparison")
+ }
+ return arr, nil
+ } else if _, ok := start.(string); ok {
+ sortable := &byExprString{intr, node, arr, false}
+ sort.Stable(sortable)
+ if sortable.hasError {
+ return nil, errors.New("error in sort_by comparison")
+ }
+ return arr, nil
+ } else {
+ return nil, errors.New("invalid type, must be number of string")
+ }
+}
+func jpfJoin(arguments []interface{}) (interface{}, error) {
+ sep := arguments[0].(string)
+ // We can't just do arguments[1].([]string), we have to
+ // manually convert each item to a string.
+ arrayStr := []string{}
+ for _, item := range arguments[1].([]interface{}) {
+ arrayStr = append(arrayStr, item.(string))
+ }
+ return strings.Join(arrayStr, sep), nil
+}
+func jpfReverse(arguments []interface{}) (interface{}, error) {
+ if s, ok := arguments[0].(string); ok {
+ r := []rune(s)
+ for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 {
+ r[i], r[j] = r[j], r[i]
+ }
+ return string(r), nil
+ }
+ items := arguments[0].([]interface{})
+ length := len(items)
+ reversed := make([]interface{}, length)
+ for i, item := range items {
+ reversed[length-(i+1)] = item
+ }
+ return reversed, nil
+}
+func jpfToArray(arguments []interface{}) (interface{}, error) {
+ if _, ok := arguments[0].([]interface{}); ok {
+ return arguments[0], nil
+ }
+ return arguments[:1:1], nil
+}
+func jpfToString(arguments []interface{}) (interface{}, error) {
+ if v, ok := arguments[0].(string); ok {
+ return v, nil
+ }
+ result, err := json.Marshal(arguments[0])
+ if err != nil {
+ return nil, err
+ }
+ return string(result), nil
+}
+func jpfToNumber(arguments []interface{}) (interface{}, error) {
+ arg := arguments[0]
+ if v, ok := arg.(float64); ok {
+ return v, nil
+ }
+ if v, ok := arg.(string); ok {
+ conv, err := strconv.ParseFloat(v, 64)
+ if err != nil {
+ return nil, nil
+ }
+ return conv, nil
+ }
+ if _, ok := arg.([]interface{}); ok {
+ return nil, nil
+ }
+ if _, ok := arg.(map[string]interface{}); ok {
+ return nil, nil
+ }
+ if arg == nil {
+ return nil, nil
+ }
+ if arg == true || arg == false {
+ return nil, nil
+ }
+ return nil, errors.New("unknown type")
+}
+func jpfNotNull(arguments []interface{}) (interface{}, error) {
+ for _, arg := range arguments {
+ if arg != nil {
+ return arg, nil
+ }
+ }
+ return nil, nil
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/interpreter.go b/vendor/github.com/jmespath/go-jmespath/interpreter.go
new file mode 100644
index 0000000..13c7460
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/interpreter.go
@@ -0,0 +1,418 @@
+package jmespath
+
+import (
+ "errors"
+ "reflect"
+ "unicode"
+ "unicode/utf8"
+)
+
+/* This is a tree based interpreter. It walks the AST and directly
+ interprets the AST to search through a JSON document.
+*/
+
+type treeInterpreter struct {
+ fCall *functionCaller
+}
+
+func newInterpreter() *treeInterpreter {
+ interpreter := treeInterpreter{}
+ interpreter.fCall = newFunctionCaller()
+ return &interpreter
+}
+
+type expRef struct {
+ ref ASTNode
+}
+
+// Execute takes an ASTNode and input data and interprets the AST directly.
+// It will produce the result of applying the JMESPath expression associated
+// with the ASTNode to the input data "value".
+func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) {
+ switch node.nodeType {
+ case ASTComparator:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ right, err := intr.Execute(node.children[1], value)
+ if err != nil {
+ return nil, err
+ }
+ switch node.value {
+ case tEQ:
+ return objsEqual(left, right), nil
+ case tNE:
+ return !objsEqual(left, right), nil
+ }
+ leftNum, ok := left.(float64)
+ if !ok {
+ return nil, nil
+ }
+ rightNum, ok := right.(float64)
+ if !ok {
+ return nil, nil
+ }
+ switch node.value {
+ case tGT:
+ return leftNum > rightNum, nil
+ case tGTE:
+ return leftNum >= rightNum, nil
+ case tLT:
+ return leftNum < rightNum, nil
+ case tLTE:
+ return leftNum <= rightNum, nil
+ }
+ case ASTExpRef:
+ return expRef{ref: node.children[0]}, nil
+ case ASTFunctionExpression:
+ resolvedArgs := []interface{}{}
+ for _, arg := range node.children {
+ current, err := intr.Execute(arg, value)
+ if err != nil {
+ return nil, err
+ }
+ resolvedArgs = append(resolvedArgs, current)
+ }
+ return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr)
+ case ASTField:
+ if m, ok := value.(map[string]interface{}); ok {
+ key := node.value.(string)
+ return m[key], nil
+ }
+ return intr.fieldFromStruct(node.value.(string), value)
+ case ASTFilterProjection:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, nil
+ }
+ sliceType, ok := left.([]interface{})
+ if !ok {
+ if isSliceType(left) {
+ return intr.filterProjectionWithReflection(node, left)
+ }
+ return nil, nil
+ }
+ compareNode := node.children[2]
+ collected := []interface{}{}
+ for _, element := range sliceType {
+ result, err := intr.Execute(compareNode, element)
+ if err != nil {
+ return nil, err
+ }
+ if !isFalse(result) {
+ current, err := intr.Execute(node.children[1], element)
+ if err != nil {
+ return nil, err
+ }
+ if current != nil {
+ collected = append(collected, current)
+ }
+ }
+ }
+ return collected, nil
+ case ASTFlatten:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, nil
+ }
+ sliceType, ok := left.([]interface{})
+ if !ok {
+ // If we can't type convert to []interface{}, there's
+ // a chance this could still work via reflection if we're
+ // dealing with user provided types.
+ if isSliceType(left) {
+ return intr.flattenWithReflection(left)
+ }
+ return nil, nil
+ }
+ flattened := []interface{}{}
+ for _, element := range sliceType {
+ if elementSlice, ok := element.([]interface{}); ok {
+ flattened = append(flattened, elementSlice...)
+ } else if isSliceType(element) {
+ reflectFlat := []interface{}{}
+ v := reflect.ValueOf(element)
+ for i := 0; i < v.Len(); i++ {
+ reflectFlat = append(reflectFlat, v.Index(i).Interface())
+ }
+ flattened = append(flattened, reflectFlat...)
+ } else {
+ flattened = append(flattened, element)
+ }
+ }
+ return flattened, nil
+ case ASTIdentity, ASTCurrentNode:
+ return value, nil
+ case ASTIndex:
+ if sliceType, ok := value.([]interface{}); ok {
+ index := node.value.(int)
+ if index < 0 {
+ index += len(sliceType)
+ }
+ if index < len(sliceType) && index >= 0 {
+ return sliceType[index], nil
+ }
+ return nil, nil
+ }
+ // Otherwise try via reflection.
+ rv := reflect.ValueOf(value)
+ if rv.Kind() == reflect.Slice {
+ index := node.value.(int)
+ if index < 0 {
+ index += rv.Len()
+ }
+ if index < rv.Len() && index >= 0 {
+ v := rv.Index(index)
+ return v.Interface(), nil
+ }
+ }
+ return nil, nil
+ case ASTKeyValPair:
+ return intr.Execute(node.children[0], value)
+ case ASTLiteral:
+ return node.value, nil
+ case ASTMultiSelectHash:
+ if value == nil {
+ return nil, nil
+ }
+ collected := make(map[string]interface{})
+ for _, child := range node.children {
+ current, err := intr.Execute(child, value)
+ if err != nil {
+ return nil, err
+ }
+ key := child.value.(string)
+ collected[key] = current
+ }
+ return collected, nil
+ case ASTMultiSelectList:
+ if value == nil {
+ return nil, nil
+ }
+ collected := []interface{}{}
+ for _, child := range node.children {
+ current, err := intr.Execute(child, value)
+ if err != nil {
+ return nil, err
+ }
+ collected = append(collected, current)
+ }
+ return collected, nil
+ case ASTOrExpression:
+ matched, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ if isFalse(matched) {
+ matched, err = intr.Execute(node.children[1], value)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return matched, nil
+ case ASTAndExpression:
+ matched, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ if isFalse(matched) {
+ return matched, nil
+ }
+ return intr.Execute(node.children[1], value)
+ case ASTNotExpression:
+ matched, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ if isFalse(matched) {
+ return true, nil
+ }
+ return false, nil
+ case ASTPipe:
+ result := value
+ var err error
+ for _, child := range node.children {
+ result, err = intr.Execute(child, result)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return result, nil
+ case ASTProjection:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ sliceType, ok := left.([]interface{})
+ if !ok {
+ if isSliceType(left) {
+ return intr.projectWithReflection(node, left)
+ }
+ return nil, nil
+ }
+ collected := []interface{}{}
+ var current interface{}
+ for _, element := range sliceType {
+ current, err = intr.Execute(node.children[1], element)
+ if err != nil {
+ return nil, err
+ }
+ if current != nil {
+ collected = append(collected, current)
+ }
+ }
+ return collected, nil
+ case ASTSubexpression, ASTIndexExpression:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, err
+ }
+ return intr.Execute(node.children[1], left)
+ case ASTSlice:
+ sliceType, ok := value.([]interface{})
+ if !ok {
+ if isSliceType(value) {
+ return intr.sliceWithReflection(node, value)
+ }
+ return nil, nil
+ }
+ parts := node.value.([]*int)
+ sliceParams := make([]sliceParam, 3)
+ for i, part := range parts {
+ if part != nil {
+ sliceParams[i].Specified = true
+ sliceParams[i].N = *part
+ }
+ }
+ return slice(sliceType, sliceParams)
+ case ASTValueProjection:
+ left, err := intr.Execute(node.children[0], value)
+ if err != nil {
+ return nil, nil
+ }
+ mapType, ok := left.(map[string]interface{})
+ if !ok {
+ return nil, nil
+ }
+ values := make([]interface{}, len(mapType))
+ for _, value := range mapType {
+ values = append(values, value)
+ }
+ collected := []interface{}{}
+ for _, element := range values {
+ current, err := intr.Execute(node.children[1], element)
+ if err != nil {
+ return nil, err
+ }
+ if current != nil {
+ collected = append(collected, current)
+ }
+ }
+ return collected, nil
+ }
+ return nil, errors.New("Unknown AST node: " + node.nodeType.String())
+}
+
+func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) {
+ rv := reflect.ValueOf(value)
+ first, n := utf8.DecodeRuneInString(key)
+ fieldName := string(unicode.ToUpper(first)) + key[n:]
+ if rv.Kind() == reflect.Struct {
+ v := rv.FieldByName(fieldName)
+ if !v.IsValid() {
+ return nil, nil
+ }
+ return v.Interface(), nil
+ } else if rv.Kind() == reflect.Ptr {
+ // Handle multiple levels of indirection?
+ if rv.IsNil() {
+ return nil, nil
+ }
+ rv = rv.Elem()
+ v := rv.FieldByName(fieldName)
+ if !v.IsValid() {
+ return nil, nil
+ }
+ return v.Interface(), nil
+ }
+ return nil, nil
+}
+
+func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) {
+ v := reflect.ValueOf(value)
+ flattened := []interface{}{}
+ for i := 0; i < v.Len(); i++ {
+ element := v.Index(i).Interface()
+ if reflect.TypeOf(element).Kind() == reflect.Slice {
+ // Then insert the contents of the element
+ // slice into the flattened slice,
+ // i.e flattened = append(flattened, mySlice...)
+ elementV := reflect.ValueOf(element)
+ for j := 0; j < elementV.Len(); j++ {
+ flattened = append(
+ flattened, elementV.Index(j).Interface())
+ }
+ } else {
+ flattened = append(flattened, element)
+ }
+ }
+ return flattened, nil
+}
+
+func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) {
+ v := reflect.ValueOf(value)
+ parts := node.value.([]*int)
+ sliceParams := make([]sliceParam, 3)
+ for i, part := range parts {
+ if part != nil {
+ sliceParams[i].Specified = true
+ sliceParams[i].N = *part
+ }
+ }
+ final := []interface{}{}
+ for i := 0; i < v.Len(); i++ {
+ element := v.Index(i).Interface()
+ final = append(final, element)
+ }
+ return slice(final, sliceParams)
+}
+
+func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) {
+ compareNode := node.children[2]
+ collected := []interface{}{}
+ v := reflect.ValueOf(value)
+ for i := 0; i < v.Len(); i++ {
+ element := v.Index(i).Interface()
+ result, err := intr.Execute(compareNode, element)
+ if err != nil {
+ return nil, err
+ }
+ if !isFalse(result) {
+ current, err := intr.Execute(node.children[1], element)
+ if err != nil {
+ return nil, err
+ }
+ if current != nil {
+ collected = append(collected, current)
+ }
+ }
+ }
+ return collected, nil
+}
+
+func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) {
+ collected := []interface{}{}
+ v := reflect.ValueOf(value)
+ for i := 0; i < v.Len(); i++ {
+ element := v.Index(i).Interface()
+ result, err := intr.Execute(node.children[1], element)
+ if err != nil {
+ return nil, err
+ }
+ if result != nil {
+ collected = append(collected, result)
+ }
+ }
+ return collected, nil
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/lexer.go b/vendor/github.com/jmespath/go-jmespath/lexer.go
new file mode 100644
index 0000000..817900c
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/lexer.go
@@ -0,0 +1,420 @@
+package jmespath
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type token struct {
+ tokenType tokType
+ value string
+ position int
+ length int
+}
+
+type tokType int
+
+const eof = -1
+
+// Lexer contains information about the expression being tokenized.
+type Lexer struct {
+ expression string // The expression provided by the user.
+ currentPos int // The current position in the string.
+ lastWidth int // The width of the current rune. This
+ buf bytes.Buffer // Internal buffer used for building up values.
+}
+
+// SyntaxError is the main error used whenever a lexing or parsing error occurs.
+type SyntaxError struct {
+ msg string // Error message displayed to user
+ Expression string // Expression that generated a SyntaxError
+ Offset int // The location in the string where the error occurred
+}
+
+func (e SyntaxError) Error() string {
+ // In the future, it would be good to underline the specific
+ // location where the error occurred.
+ return "SyntaxError: " + e.msg
+}
+
+// HighlightLocation will show where the syntax error occurred.
+// It will place a "^" character on a line below the expression
+// at the point where the syntax error occurred.
+func (e SyntaxError) HighlightLocation() string {
+ return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^"
+}
+
+//go:generate stringer -type=tokType
+const (
+ tUnknown tokType = iota
+ tStar
+ tDot
+ tFilter
+ tFlatten
+ tLparen
+ tRparen
+ tLbracket
+ tRbracket
+ tLbrace
+ tRbrace
+ tOr
+ tPipe
+ tNumber
+ tUnquotedIdentifier
+ tQuotedIdentifier
+ tComma
+ tColon
+ tLT
+ tLTE
+ tGT
+ tGTE
+ tEQ
+ tNE
+ tJSONLiteral
+ tStringLiteral
+ tCurrent
+ tExpref
+ tAnd
+ tNot
+ tEOF
+)
+
+var basicTokens = map[rune]tokType{
+ '.': tDot,
+ '*': tStar,
+ ',': tComma,
+ ':': tColon,
+ '{': tLbrace,
+ '}': tRbrace,
+ ']': tRbracket, // tLbracket not included because it could be "[]"
+ '(': tLparen,
+ ')': tRparen,
+ '@': tCurrent,
+}
+
+// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64.
+// When using this bitmask just be sure to shift the rune down 64 bits
+// before checking against identifierStartBits.
+const identifierStartBits uint64 = 576460745995190270
+
+// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s.
+var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270}
+
+var whiteSpace = map[rune]bool{
+ ' ': true, '\t': true, '\n': true, '\r': true,
+}
+
+func (t token) String() string {
+ return fmt.Sprintf("Token{%+v, %s, %d, %d}",
+ t.tokenType, t.value, t.position, t.length)
+}
+
+// NewLexer creates a new JMESPath lexer.
+func NewLexer() *Lexer {
+ lexer := Lexer{}
+ return &lexer
+}
+
+func (lexer *Lexer) next() rune {
+ if lexer.currentPos >= len(lexer.expression) {
+ lexer.lastWidth = 0
+ return eof
+ }
+ r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:])
+ lexer.lastWidth = w
+ lexer.currentPos += w
+ return r
+}
+
+func (lexer *Lexer) back() {
+ lexer.currentPos -= lexer.lastWidth
+}
+
+func (lexer *Lexer) peek() rune {
+ t := lexer.next()
+ lexer.back()
+ return t
+}
+
+// tokenize takes an expression and returns corresponding tokens.
+func (lexer *Lexer) tokenize(expression string) ([]token, error) {
+ var tokens []token
+ lexer.expression = expression
+ lexer.currentPos = 0
+ lexer.lastWidth = 0
+loop:
+ for {
+ r := lexer.next()
+ if identifierStartBits&(1<<(uint64(r)-64)) > 0 {
+ t := lexer.consumeUnquotedIdentifier()
+ tokens = append(tokens, t)
+ } else if val, ok := basicTokens[r]; ok {
+ // Basic single char token.
+ t := token{
+ tokenType: val,
+ value: string(r),
+ position: lexer.currentPos - lexer.lastWidth,
+ length: 1,
+ }
+ tokens = append(tokens, t)
+ } else if r == '-' || (r >= '0' && r <= '9') {
+ t := lexer.consumeNumber()
+ tokens = append(tokens, t)
+ } else if r == '[' {
+ t := lexer.consumeLBracket()
+ tokens = append(tokens, t)
+ } else if r == '"' {
+ t, err := lexer.consumeQuotedIdentifier()
+ if err != nil {
+ return tokens, err
+ }
+ tokens = append(tokens, t)
+ } else if r == '\'' {
+ t, err := lexer.consumeRawStringLiteral()
+ if err != nil {
+ return tokens, err
+ }
+ tokens = append(tokens, t)
+ } else if r == '`' {
+ t, err := lexer.consumeLiteral()
+ if err != nil {
+ return tokens, err
+ }
+ tokens = append(tokens, t)
+ } else if r == '|' {
+ t := lexer.matchOrElse(r, '|', tOr, tPipe)
+ tokens = append(tokens, t)
+ } else if r == '<' {
+ t := lexer.matchOrElse(r, '=', tLTE, tLT)
+ tokens = append(tokens, t)
+ } else if r == '>' {
+ t := lexer.matchOrElse(r, '=', tGTE, tGT)
+ tokens = append(tokens, t)
+ } else if r == '!' {
+ t := lexer.matchOrElse(r, '=', tNE, tNot)
+ tokens = append(tokens, t)
+ } else if r == '=' {
+ t := lexer.matchOrElse(r, '=', tEQ, tUnknown)
+ tokens = append(tokens, t)
+ } else if r == '&' {
+ t := lexer.matchOrElse(r, '&', tAnd, tExpref)
+ tokens = append(tokens, t)
+ } else if r == eof {
+ break loop
+ } else if _, ok := whiteSpace[r]; ok {
+ // Ignore whitespace
+ } else {
+ return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r)))
+ }
+ }
+ tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0})
+ return tokens, nil
+}
+
+// Consume characters until the ending rune "r" is reached.
+// If the end of the expression is reached before seeing the
+// terminating rune "r", then an error is returned.
+// If no error occurs then the matching substring is returned.
+// The returned string will not include the ending rune.
+func (lexer *Lexer) consumeUntil(end rune) (string, error) {
+ start := lexer.currentPos
+ current := lexer.next()
+ for current != end && current != eof {
+ if current == '\\' && lexer.peek() != eof {
+ lexer.next()
+ }
+ current = lexer.next()
+ }
+ if lexer.lastWidth == 0 {
+ // Then we hit an EOF so we never reached the closing
+ // delimiter.
+ return "", SyntaxError{
+ msg: "Unclosed delimiter: " + string(end),
+ Expression: lexer.expression,
+ Offset: len(lexer.expression),
+ }
+ }
+ return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil
+}
+
+func (lexer *Lexer) consumeLiteral() (token, error) {
+ start := lexer.currentPos
+ value, err := lexer.consumeUntil('`')
+ if err != nil {
+ return token{}, err
+ }
+ value = strings.Replace(value, "\\`", "`", -1)
+ return token{
+ tokenType: tJSONLiteral,
+ value: value,
+ position: start,
+ length: len(value),
+ }, nil
+}
+
+func (lexer *Lexer) consumeRawStringLiteral() (token, error) {
+ start := lexer.currentPos
+ currentIndex := start
+ current := lexer.next()
+ for current != '\'' && lexer.peek() != eof {
+ if current == '\\' && lexer.peek() == '\'' {
+ chunk := lexer.expression[currentIndex : lexer.currentPos-1]
+ lexer.buf.WriteString(chunk)
+ lexer.buf.WriteString("'")
+ lexer.next()
+ currentIndex = lexer.currentPos
+ }
+ current = lexer.next()
+ }
+ if lexer.lastWidth == 0 {
+ // Then we hit an EOF so we never reached the closing
+ // delimiter.
+ return token{}, SyntaxError{
+ msg: "Unclosed delimiter: '",
+ Expression: lexer.expression,
+ Offset: len(lexer.expression),
+ }
+ }
+ if currentIndex < lexer.currentPos {
+ lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1])
+ }
+ value := lexer.buf.String()
+ // Reset the buffer so it can reused again.
+ lexer.buf.Reset()
+ return token{
+ tokenType: tStringLiteral,
+ value: value,
+ position: start,
+ length: len(value),
+ }, nil
+}
+
+func (lexer *Lexer) syntaxError(msg string) SyntaxError {
+ return SyntaxError{
+ msg: msg,
+ Expression: lexer.expression,
+ Offset: lexer.currentPos - 1,
+ }
+}
+
+// Checks for a two char token, otherwise matches a single character
+// token. This is used whenever a two char token overlaps a single
+// char token, e.g. "||" -> tPipe, "|" -> tOr.
+func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token {
+ start := lexer.currentPos - lexer.lastWidth
+ nextRune := lexer.next()
+ var t token
+ if nextRune == second {
+ t = token{
+ tokenType: matchedType,
+ value: string(first) + string(second),
+ position: start,
+ length: 2,
+ }
+ } else {
+ lexer.back()
+ t = token{
+ tokenType: singleCharType,
+ value: string(first),
+ position: start,
+ length: 1,
+ }
+ }
+ return t
+}
+
+func (lexer *Lexer) consumeLBracket() token {
+ // There's three options here:
+ // 1. A filter expression "[?"
+ // 2. A flatten operator "[]"
+ // 3. A bare rbracket "["
+ start := lexer.currentPos - lexer.lastWidth
+ nextRune := lexer.next()
+ var t token
+ if nextRune == '?' {
+ t = token{
+ tokenType: tFilter,
+ value: "[?",
+ position: start,
+ length: 2,
+ }
+ } else if nextRune == ']' {
+ t = token{
+ tokenType: tFlatten,
+ value: "[]",
+ position: start,
+ length: 2,
+ }
+ } else {
+ t = token{
+ tokenType: tLbracket,
+ value: "[",
+ position: start,
+ length: 1,
+ }
+ lexer.back()
+ }
+ return t
+}
+
+func (lexer *Lexer) consumeQuotedIdentifier() (token, error) {
+ start := lexer.currentPos
+ value, err := lexer.consumeUntil('"')
+ if err != nil {
+ return token{}, err
+ }
+ var decoded string
+ asJSON := []byte("\"" + value + "\"")
+ if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil {
+ return token{}, err
+ }
+ return token{
+ tokenType: tQuotedIdentifier,
+ value: decoded,
+ position: start - 1,
+ length: len(decoded),
+ }, nil
+}
+
+func (lexer *Lexer) consumeUnquotedIdentifier() token {
+ // Consume runes until we reach the end of an unquoted
+ // identifier.
+ start := lexer.currentPos - lexer.lastWidth
+ for {
+ r := lexer.next()
+ if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 {
+ lexer.back()
+ break
+ }
+ }
+ value := lexer.expression[start:lexer.currentPos]
+ return token{
+ tokenType: tUnquotedIdentifier,
+ value: value,
+ position: start,
+ length: lexer.currentPos - start,
+ }
+}
+
+func (lexer *Lexer) consumeNumber() token {
+ // Consume runes until we reach something that's not a number.
+ start := lexer.currentPos - lexer.lastWidth
+ for {
+ r := lexer.next()
+ if r < '0' || r > '9' {
+ lexer.back()
+ break
+ }
+ }
+ value := lexer.expression[start:lexer.currentPos]
+ return token{
+ tokenType: tNumber,
+ value: value,
+ position: start,
+ length: lexer.currentPos - start,
+ }
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/parser.go b/vendor/github.com/jmespath/go-jmespath/parser.go
new file mode 100644
index 0000000..1240a17
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/parser.go
@@ -0,0 +1,603 @@
+package jmespath
+
+import (
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+type astNodeType int
+
+//go:generate stringer -type astNodeType
+const (
+ ASTEmpty astNodeType = iota
+ ASTComparator
+ ASTCurrentNode
+ ASTExpRef
+ ASTFunctionExpression
+ ASTField
+ ASTFilterProjection
+ ASTFlatten
+ ASTIdentity
+ ASTIndex
+ ASTIndexExpression
+ ASTKeyValPair
+ ASTLiteral
+ ASTMultiSelectHash
+ ASTMultiSelectList
+ ASTOrExpression
+ ASTAndExpression
+ ASTNotExpression
+ ASTPipe
+ ASTProjection
+ ASTSubexpression
+ ASTSlice
+ ASTValueProjection
+)
+
+// ASTNode represents the abstract syntax tree of a JMESPath expression.
+type ASTNode struct {
+ nodeType astNodeType
+ value interface{}
+ children []ASTNode
+}
+
+func (node ASTNode) String() string {
+ return node.PrettyPrint(0)
+}
+
+// PrettyPrint will pretty print the parsed AST.
+// The AST is an implementation detail and this pretty print
+// function is provided as a convenience method to help with
+// debugging. You should not rely on its output as the internal
+// structure of the AST may change at any time.
+func (node ASTNode) PrettyPrint(indent int) string {
+ spaces := strings.Repeat(" ", indent)
+ output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType)
+ nextIndent := indent + 2
+ if node.value != nil {
+ if converted, ok := node.value.(fmt.Stringer); ok {
+ // Account for things like comparator nodes
+ // that are enums with a String() method.
+ output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String())
+ } else {
+ output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value)
+ }
+ }
+ lastIndex := len(node.children)
+ if lastIndex > 0 {
+ output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent))
+ childIndent := nextIndent + 2
+ for _, elem := range node.children {
+ output += elem.PrettyPrint(childIndent)
+ }
+ }
+ output += fmt.Sprintf("%s}\n", spaces)
+ return output
+}
+
+var bindingPowers = map[tokType]int{
+ tEOF: 0,
+ tUnquotedIdentifier: 0,
+ tQuotedIdentifier: 0,
+ tRbracket: 0,
+ tRparen: 0,
+ tComma: 0,
+ tRbrace: 0,
+ tNumber: 0,
+ tCurrent: 0,
+ tExpref: 0,
+ tColon: 0,
+ tPipe: 1,
+ tOr: 2,
+ tAnd: 3,
+ tEQ: 5,
+ tLT: 5,
+ tLTE: 5,
+ tGT: 5,
+ tGTE: 5,
+ tNE: 5,
+ tFlatten: 9,
+ tStar: 20,
+ tFilter: 21,
+ tDot: 40,
+ tNot: 45,
+ tLbrace: 50,
+ tLbracket: 55,
+ tLparen: 60,
+}
+
+// Parser holds state about the current expression being parsed.
+type Parser struct {
+ expression string
+ tokens []token
+ index int
+}
+
+// NewParser creates a new JMESPath parser.
+func NewParser() *Parser {
+ p := Parser{}
+ return &p
+}
+
+// Parse will compile a JMESPath expression.
+func (p *Parser) Parse(expression string) (ASTNode, error) {
+ lexer := NewLexer()
+ p.expression = expression
+ p.index = 0
+ tokens, err := lexer.tokenize(expression)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ p.tokens = tokens
+ parsed, err := p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ if p.current() != tEOF {
+ return ASTNode{}, p.syntaxError(fmt.Sprintf(
+ "Unexpected token at the end of the expresssion: %s", p.current()))
+ }
+ return parsed, nil
+}
+
+func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) {
+ var err error
+ leftToken := p.lookaheadToken(0)
+ p.advance()
+ leftNode, err := p.nud(leftToken)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ currentToken := p.current()
+ for bindingPower < bindingPowers[currentToken] {
+ p.advance()
+ leftNode, err = p.led(currentToken, leftNode)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ currentToken = p.current()
+ }
+ return leftNode, nil
+}
+
+func (p *Parser) parseIndexExpression() (ASTNode, error) {
+ if p.lookahead(0) == tColon || p.lookahead(1) == tColon {
+ return p.parseSliceExpression()
+ }
+ indexStr := p.lookaheadToken(0).value
+ parsedInt, err := strconv.Atoi(indexStr)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt}
+ p.advance()
+ if err := p.match(tRbracket); err != nil {
+ return ASTNode{}, err
+ }
+ return indexNode, nil
+}
+
+func (p *Parser) parseSliceExpression() (ASTNode, error) {
+ parts := []*int{nil, nil, nil}
+ index := 0
+ current := p.current()
+ for current != tRbracket && index < 3 {
+ if current == tColon {
+ index++
+ p.advance()
+ } else if current == tNumber {
+ parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ parts[index] = &parsedInt
+ p.advance()
+ } else {
+ return ASTNode{}, p.syntaxError(
+ "Expected tColon or tNumber" + ", received: " + p.current().String())
+ }
+ current = p.current()
+ }
+ if err := p.match(tRbracket); err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTSlice,
+ value: parts,
+ }, nil
+}
+
+func (p *Parser) match(tokenType tokType) error {
+ if p.current() == tokenType {
+ p.advance()
+ return nil
+ }
+ return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String())
+}
+
+func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) {
+ switch tokenType {
+ case tDot:
+ if p.current() != tStar {
+ right, err := p.parseDotRHS(bindingPowers[tDot])
+ return ASTNode{
+ nodeType: ASTSubexpression,
+ children: []ASTNode{node, right},
+ }, err
+ }
+ p.advance()
+ right, err := p.parseProjectionRHS(bindingPowers[tDot])
+ return ASTNode{
+ nodeType: ASTValueProjection,
+ children: []ASTNode{node, right},
+ }, err
+ case tPipe:
+ right, err := p.parseExpression(bindingPowers[tPipe])
+ return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err
+ case tOr:
+ right, err := p.parseExpression(bindingPowers[tOr])
+ return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err
+ case tAnd:
+ right, err := p.parseExpression(bindingPowers[tAnd])
+ return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err
+ case tLparen:
+ name := node.value
+ var args []ASTNode
+ for p.current() != tRparen {
+ expression, err := p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ if p.current() == tComma {
+ if err := p.match(tComma); err != nil {
+ return ASTNode{}, err
+ }
+ }
+ args = append(args, expression)
+ }
+ if err := p.match(tRparen); err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTFunctionExpression,
+ value: name,
+ children: args,
+ }, nil
+ case tFilter:
+ return p.parseFilter(node)
+ case tFlatten:
+ left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}}
+ right, err := p.parseProjectionRHS(bindingPowers[tFlatten])
+ return ASTNode{
+ nodeType: ASTProjection,
+ children: []ASTNode{left, right},
+ }, err
+ case tEQ, tNE, tGT, tGTE, tLT, tLTE:
+ right, err := p.parseExpression(bindingPowers[tokenType])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTComparator,
+ value: tokenType,
+ children: []ASTNode{node, right},
+ }, nil
+ case tLbracket:
+ tokenType := p.current()
+ var right ASTNode
+ var err error
+ if tokenType == tNumber || tokenType == tColon {
+ right, err = p.parseIndexExpression()
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return p.projectIfSlice(node, right)
+ }
+ // Otherwise this is a projection.
+ if err := p.match(tStar); err != nil {
+ return ASTNode{}, err
+ }
+ if err := p.match(tRbracket); err != nil {
+ return ASTNode{}, err
+ }
+ right, err = p.parseProjectionRHS(bindingPowers[tStar])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTProjection,
+ children: []ASTNode{node, right},
+ }, nil
+ }
+ return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String())
+}
+
+func (p *Parser) nud(token token) (ASTNode, error) {
+ switch token.tokenType {
+ case tJSONLiteral:
+ var parsed interface{}
+ err := json.Unmarshal([]byte(token.value), &parsed)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{nodeType: ASTLiteral, value: parsed}, nil
+ case tStringLiteral:
+ return ASTNode{nodeType: ASTLiteral, value: token.value}, nil
+ case tUnquotedIdentifier:
+ return ASTNode{
+ nodeType: ASTField,
+ value: token.value,
+ }, nil
+ case tQuotedIdentifier:
+ node := ASTNode{nodeType: ASTField, value: token.value}
+ if p.current() == tLparen {
+ return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token)
+ }
+ return node, nil
+ case tStar:
+ left := ASTNode{nodeType: ASTIdentity}
+ var right ASTNode
+ var err error
+ if p.current() == tRbracket {
+ right = ASTNode{nodeType: ASTIdentity}
+ } else {
+ right, err = p.parseProjectionRHS(bindingPowers[tStar])
+ }
+ return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err
+ case tFilter:
+ return p.parseFilter(ASTNode{nodeType: ASTIdentity})
+ case tLbrace:
+ return p.parseMultiSelectHash()
+ case tFlatten:
+ left := ASTNode{
+ nodeType: ASTFlatten,
+ children: []ASTNode{{nodeType: ASTIdentity}},
+ }
+ right, err := p.parseProjectionRHS(bindingPowers[tFlatten])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil
+ case tLbracket:
+ tokenType := p.current()
+ //var right ASTNode
+ if tokenType == tNumber || tokenType == tColon {
+ right, err := p.parseIndexExpression()
+ if err != nil {
+ return ASTNode{}, nil
+ }
+ return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right)
+ } else if tokenType == tStar && p.lookahead(1) == tRbracket {
+ p.advance()
+ p.advance()
+ right, err := p.parseProjectionRHS(bindingPowers[tStar])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTProjection,
+ children: []ASTNode{{nodeType: ASTIdentity}, right},
+ }, nil
+ } else {
+ return p.parseMultiSelectList()
+ }
+ case tCurrent:
+ return ASTNode{nodeType: ASTCurrentNode}, nil
+ case tExpref:
+ expression, err := p.parseExpression(bindingPowers[tExpref])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil
+ case tNot:
+ expression, err := p.parseExpression(bindingPowers[tNot])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil
+ case tLparen:
+ expression, err := p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ if err := p.match(tRparen); err != nil {
+ return ASTNode{}, err
+ }
+ return expression, nil
+ case tEOF:
+ return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token)
+ }
+
+ return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token)
+}
+
+func (p *Parser) parseMultiSelectList() (ASTNode, error) {
+ var expressions []ASTNode
+ for {
+ expression, err := p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ expressions = append(expressions, expression)
+ if p.current() == tRbracket {
+ break
+ }
+ err = p.match(tComma)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ }
+ err := p.match(tRbracket)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return ASTNode{
+ nodeType: ASTMultiSelectList,
+ children: expressions,
+ }, nil
+}
+
+func (p *Parser) parseMultiSelectHash() (ASTNode, error) {
+ var children []ASTNode
+ for {
+ keyToken := p.lookaheadToken(0)
+ if err := p.match(tUnquotedIdentifier); err != nil {
+ if err := p.match(tQuotedIdentifier); err != nil {
+ return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier")
+ }
+ }
+ keyName := keyToken.value
+ err := p.match(tColon)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ value, err := p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ node := ASTNode{
+ nodeType: ASTKeyValPair,
+ value: keyName,
+ children: []ASTNode{value},
+ }
+ children = append(children, node)
+ if p.current() == tComma {
+ err := p.match(tComma)
+ if err != nil {
+ return ASTNode{}, nil
+ }
+ } else if p.current() == tRbrace {
+ err := p.match(tRbrace)
+ if err != nil {
+ return ASTNode{}, nil
+ }
+ break
+ }
+ }
+ return ASTNode{
+ nodeType: ASTMultiSelectHash,
+ children: children,
+ }, nil
+}
+
+func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) {
+ indexExpr := ASTNode{
+ nodeType: ASTIndexExpression,
+ children: []ASTNode{left, right},
+ }
+ if right.nodeType == ASTSlice {
+ right, err := p.parseProjectionRHS(bindingPowers[tStar])
+ return ASTNode{
+ nodeType: ASTProjection,
+ children: []ASTNode{indexExpr, right},
+ }, err
+ }
+ return indexExpr, nil
+}
+func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) {
+ var right, condition ASTNode
+ var err error
+ condition, err = p.parseExpression(0)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ if err := p.match(tRbracket); err != nil {
+ return ASTNode{}, err
+ }
+ if p.current() == tFlatten {
+ right = ASTNode{nodeType: ASTIdentity}
+ } else {
+ right, err = p.parseProjectionRHS(bindingPowers[tFilter])
+ if err != nil {
+ return ASTNode{}, err
+ }
+ }
+
+ return ASTNode{
+ nodeType: ASTFilterProjection,
+ children: []ASTNode{node, right, condition},
+ }, nil
+}
+
+func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) {
+ lookahead := p.current()
+ if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) {
+ return p.parseExpression(bindingPower)
+ } else if lookahead == tLbracket {
+ if err := p.match(tLbracket); err != nil {
+ return ASTNode{}, err
+ }
+ return p.parseMultiSelectList()
+ } else if lookahead == tLbrace {
+ if err := p.match(tLbrace); err != nil {
+ return ASTNode{}, err
+ }
+ return p.parseMultiSelectHash()
+ }
+ return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace")
+}
+
+func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) {
+ current := p.current()
+ if bindingPowers[current] < 10 {
+ return ASTNode{nodeType: ASTIdentity}, nil
+ } else if current == tLbracket {
+ return p.parseExpression(bindingPower)
+ } else if current == tFilter {
+ return p.parseExpression(bindingPower)
+ } else if current == tDot {
+ err := p.match(tDot)
+ if err != nil {
+ return ASTNode{}, err
+ }
+ return p.parseDotRHS(bindingPower)
+ } else {
+ return ASTNode{}, p.syntaxError("Error")
+ }
+}
+
+func (p *Parser) lookahead(number int) tokType {
+ return p.lookaheadToken(number).tokenType
+}
+
+func (p *Parser) current() tokType {
+ return p.lookahead(0)
+}
+
+func (p *Parser) lookaheadToken(number int) token {
+ return p.tokens[p.index+number]
+}
+
+func (p *Parser) advance() {
+ p.index++
+}
+
+func tokensOneOf(elements []tokType, token tokType) bool {
+ for _, elem := range elements {
+ if elem == token {
+ return true
+ }
+ }
+ return false
+}
+
+func (p *Parser) syntaxError(msg string) SyntaxError {
+ return SyntaxError{
+ msg: msg,
+ Expression: p.expression,
+ Offset: p.lookaheadToken(0).position,
+ }
+}
+
+// Create a SyntaxError based on the provided token.
+// This differs from syntaxError() which creates a SyntaxError
+// based on the current lookahead token.
+func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError {
+ return SyntaxError{
+ msg: msg,
+ Expression: p.expression,
+ Offset: t.position,
+ }
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/toktype_string.go b/vendor/github.com/jmespath/go-jmespath/toktype_string.go
new file mode 100644
index 0000000..dae79cb
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/toktype_string.go
@@ -0,0 +1,16 @@
+// generated by stringer -type=tokType; DO NOT EDIT
+
+package jmespath
+
+import "fmt"
+
+const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF"
+
+var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214}
+
+func (i tokType) String() string {
+ if i < 0 || i >= tokType(len(_tokType_index)-1) {
+ return fmt.Sprintf("tokType(%d)", i)
+ }
+ return _tokType_name[_tokType_index[i]:_tokType_index[i+1]]
+}
diff --git a/vendor/github.com/jmespath/go-jmespath/util.go b/vendor/github.com/jmespath/go-jmespath/util.go
new file mode 100644
index 0000000..ddc1b7d
--- /dev/null
+++ b/vendor/github.com/jmespath/go-jmespath/util.go
@@ -0,0 +1,185 @@
+package jmespath
+
+import (
+ "errors"
+ "reflect"
+)
+
+// IsFalse determines if an object is false based on the JMESPath spec.
+// JMESPath defines false values to be any of:
+// - An empty string array, or hash.
+// - The boolean value false.
+// - nil
+func isFalse(value interface{}) bool {
+ switch v := value.(type) {
+ case bool:
+ return !v
+ case []interface{}:
+ return len(v) == 0
+ case map[string]interface{}:
+ return len(v) == 0
+ case string:
+ return len(v) == 0
+ case nil:
+ return true
+ }
+ // Try the reflection cases before returning false.
+ rv := reflect.ValueOf(value)
+ switch rv.Kind() {
+ case reflect.Struct:
+ // A struct type will never be false, even if
+ // all of its values are the zero type.
+ return false
+ case reflect.Slice, reflect.Map:
+ return rv.Len() == 0
+ case reflect.Ptr:
+ if rv.IsNil() {
+ return true
+ }
+ // If it's a pointer type, we'll try to deref the pointer
+ // and evaluate the pointer value for isFalse.
+ element := rv.Elem()
+ return isFalse(element.Interface())
+ }
+ return false
+}
+
+// ObjsEqual is a generic object equality check.
+// It will take two arbitrary objects and recursively determine
+// if they are equal.
+func objsEqual(left interface{}, right interface{}) bool {
+ return reflect.DeepEqual(left, right)
+}
+
+// SliceParam refers to a single part of a slice.
+// A slice consists of a start, a stop, and a step, similar to
+// python slices.
+type sliceParam struct {
+ N int
+ Specified bool
+}
+
+// Slice supports [start:stop:step] style slicing that's supported in JMESPath.
+func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) {
+ computed, err := computeSliceParams(len(slice), parts)
+ if err != nil {
+ return nil, err
+ }
+ start, stop, step := computed[0], computed[1], computed[2]
+ result := []interface{}{}
+ if step > 0 {
+ for i := start; i < stop; i += step {
+ result = append(result, slice[i])
+ }
+ } else {
+ for i := start; i > stop; i += step {
+ result = append(result, slice[i])
+ }
+ }
+ return result, nil
+}
+
+func computeSliceParams(length int, parts []sliceParam) ([]int, error) {
+ var start, stop, step int
+ if !parts[2].Specified {
+ step = 1
+ } else if parts[2].N == 0 {
+ return nil, errors.New("Invalid slice, step cannot be 0")
+ } else {
+ step = parts[2].N
+ }
+ var stepValueNegative bool
+ if step < 0 {
+ stepValueNegative = true
+ } else {
+ stepValueNegative = false
+ }
+
+ if !parts[0].Specified {
+ if stepValueNegative {
+ start = length - 1
+ } else {
+ start = 0
+ }
+ } else {
+ start = capSlice(length, parts[0].N, step)
+ }
+
+ if !parts[1].Specified {
+ if stepValueNegative {
+ stop = -1
+ } else {
+ stop = length
+ }
+ } else {
+ stop = capSlice(length, parts[1].N, step)
+ }
+ return []int{start, stop, step}, nil
+}
+
+func capSlice(length int, actual int, step int) int {
+ if actual < 0 {
+ actual += length
+ if actual < 0 {
+ if step < 0 {
+ actual = -1
+ } else {
+ actual = 0
+ }
+ }
+ } else if actual >= length {
+ if step < 0 {
+ actual = length - 1
+ } else {
+ actual = length
+ }
+ }
+ return actual
+}
+
+// ToArrayNum converts an empty interface type to a slice of float64.
+// If any element in the array cannot be converted, then nil is returned
+// along with a second value of false.
+func toArrayNum(data interface{}) ([]float64, bool) {
+ // Is there a better way to do this with reflect?
+ if d, ok := data.([]interface{}); ok {
+ result := make([]float64, len(d))
+ for i, el := range d {
+ item, ok := el.(float64)
+ if !ok {
+ return nil, false
+ }
+ result[i] = item
+ }
+ return result, true
+ }
+ return nil, false
+}
+
+// ToArrayStr converts an empty interface type to a slice of strings.
+// If any element in the array cannot be converted, then nil is returned
+// along with a second value of false. If the input data could be entirely
+// converted, then the converted data, along with a second value of true,
+// will be returned.
+func toArrayStr(data interface{}) ([]string, bool) {
+ // Is there a better way to do this with reflect?
+ if d, ok := data.([]interface{}); ok {
+ result := make([]string, len(d))
+ for i, el := range d {
+ item, ok := el.(string)
+ if !ok {
+ return nil, false
+ }
+ result[i] = item
+ }
+ return result, true
+ }
+ return nil, false
+}
+
+func isSliceType(v interface{}) bool {
+ if v == nil {
+ return false
+ }
+ return reflect.TypeOf(v).Kind() == reflect.Slice
+}
diff --git a/vendor/go4.org/LICENSE b/vendor/go4.org/LICENSE
new file mode 100644
index 0000000..8f71f43
--- /dev/null
+++ b/vendor/go4.org/LICENSE
@@ -0,0 +1,202 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "{}"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright {yyyy} {name of copyright owner}
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
diff --git a/vendor/go4.org/wkfs/gcs/gcs.go b/vendor/go4.org/wkfs/gcs/gcs.go
new file mode 100644
index 0000000..21f2551
--- /dev/null
+++ b/vendor/go4.org/wkfs/gcs/gcs.go
@@ -0,0 +1,196 @@
+/*
+Copyright 2014 The Camlistore Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package gcs registers a Google Cloud Storage filesystem at the
+// well-known /gcs/ filesystem path if the current machine is running
+// on Google Compute Engine.
+//
+// It was initially only meant for small files, and as such, it can only
+// read files smaller than 1MB for now.
+package gcs // import "go4.org/wkfs/gcs"
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+ "strings"
+ "time"
+
+ "go4.org/wkfs"
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/google"
+ "google.golang.org/cloud"
+ "google.golang.org/cloud/compute/metadata"
+ "google.golang.org/cloud/storage"
+)
+
+// Max size for all files read, because we use a bytes.Reader as our file
+// reader, instead of storage.NewReader. This is because we get all wkfs.File
+// methods for free by embedding a bytes.Reader. This filesystem was only supposed
+// to be for configuration data only, so this is ok for now.
+const maxSize = 1 << 20
+
+func init() {
+ if !metadata.OnGCE() {
+ return
+ }
+ hc, err := google.DefaultClient(oauth2.NoContext)
+ if err != nil {
+ registerBrokenFS(fmt.Errorf("could not get http client for context: %v", err))
+ return
+ }
+ projID, err := metadata.ProjectID()
+ if projID == "" || err != nil {
+ registerBrokenFS(fmt.Errorf("could not get GCE project ID: %v", err))
+ return
+ }
+ ctx := cloud.NewContext(projID, hc)
+ sc, err := storage.NewClient(ctx)
+ if err != nil {
+ registerBrokenFS(fmt.Errorf("could not get cloud storage client: %v", err))
+ return
+ }
+ wkfs.RegisterFS("/gcs/", &gcsFS{
+ ctx: ctx,
+ sc: sc,
+ })
+}
+
+type gcsFS struct {
+ ctx context.Context
+ sc *storage.Client
+ err error // sticky error
+}
+
+func registerBrokenFS(err error) {
+ wkfs.RegisterFS("/gcs/", &gcsFS{
+ err: err,
+ })
+}
+
+func (fs *gcsFS) parseName(name string) (bucket, fileName string, err error) {
+ if fs.err != nil {
+ return "", "", fs.err
+ }
+ name = strings.TrimPrefix(name, "/gcs/")
+ i := strings.Index(name, "/")
+ if i < 0 {
+ return name, "", nil
+ }
+ return name[:i], name[i+1:], nil
+}
+
+// Open opens the named file for reading. It returns an error if the file size
+// is larger than 1 << 20.
+func (fs *gcsFS) Open(name string) (wkfs.File, error) {
+ bucket, fileName, err := fs.parseName(name)
+ if err != nil {
+ return nil, err
+ }
+ obj := fs.sc.Bucket(bucket).Object(fileName)
+ attrs, err := obj.Attrs(fs.ctx)
+ if err != nil {
+ return nil, err
+ }
+ size := attrs.Size
+ if size > maxSize {
+ return nil, fmt.Errorf("file %s too large (%d bytes) for /gcs/ filesystem", name, size)
+ }
+ rc, err := obj.NewReader(fs.ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer rc.Close()
+
+ slurp, err := ioutil.ReadAll(io.LimitReader(rc, size))
+ if err != nil {
+ return nil, err
+ }
+ return &file{
+ name: name,
+ Reader: bytes.NewReader(slurp),
+ }, nil
+}
+
+func (fs *gcsFS) Stat(name string) (os.FileInfo, error) { return fs.Lstat(name) }
+func (fs *gcsFS) Lstat(name string) (os.FileInfo, error) {
+ bucket, fileName, err := fs.parseName(name)
+ if err != nil {
+ return nil, err
+ }
+ attrs, err := fs.sc.Bucket(bucket).Object(fileName).Attrs(fs.ctx)
+ if err == storage.ErrObjectNotExist {
+ return nil, os.ErrNotExist
+ }
+ if err != nil {
+ return nil, err
+ }
+ return &statInfo{
+ name: attrs.Name,
+ size: attrs.Size,
+ }, nil
+}
+
+func (fs *gcsFS) MkdirAll(path string, perm os.FileMode) error { return nil }
+
+func (fs *gcsFS) OpenFile(name string, flag int, perm os.FileMode) (wkfs.FileWriter, error) {
+ bucket, fileName, err := fs.parseName(name)
+ if err != nil {
+ return nil, err
+ }
+ switch flag {
+ case os.O_WRONLY | os.O_CREATE | os.O_EXCL:
+ case os.O_WRONLY | os.O_CREATE | os.O_TRUNC:
+ default:
+ return nil, fmt.Errorf("Unsupported OpenFlag flag mode %d on Google Cloud Storage", flag)
+ }
+ if flag&os.O_EXCL != 0 {
+ if _, err := fs.Stat(name); err == nil {
+ return nil, os.ErrExist
+ }
+ }
+ // TODO(mpl): consider adding perm to the object's ObjectAttrs.Metadata
+ return fs.sc.Bucket(bucket).Object(fileName).NewWriter(fs.ctx), nil
+}
+
+type statInfo struct {
+ name string
+ size int64
+ isDir bool
+ modtime time.Time
+}
+
+func (si *statInfo) IsDir() bool { return si.isDir }
+func (si *statInfo) ModTime() time.Time { return si.modtime }
+func (si *statInfo) Mode() os.FileMode { return 0644 }
+func (si *statInfo) Name() string { return path.Base(si.name) }
+func (si *statInfo) Size() int64 { return si.size }
+func (si *statInfo) Sys() interface{} { return nil }
+
+type file struct {
+ name string
+ *bytes.Reader
+}
+
+func (*file) Close() error { return nil }
+func (f *file) Name() string { return path.Base(f.name) }
+func (f *file) Stat() (os.FileInfo, error) {
+ panic("Stat not implemented on /gcs/ files yet")
+}
diff --git a/vendor/go4.org/wkfs/wkfs.go b/vendor/go4.org/wkfs/wkfs.go
new file mode 100644
index 0000000..f4df062
--- /dev/null
+++ b/vendor/go4.org/wkfs/wkfs.go
@@ -0,0 +1,132 @@
+/*
+Copyright 2014 The Camlistore Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package wkfs implements the pluggable "well-known filesystem" abstraction layer.
+//
+// Instead of accessing files directly through the operating system
+// using os.Open or os.Stat, code should use wkfs.Open or wkfs.Stat,
+// which first try to intercept paths at well-known top-level
+// directories representing previously-registered mount types,
+// otherwise fall through to the operating system paths.
+//
+// Example of top-level well-known directories that might be
+// registered include /gcs/bucket/object for Google Cloud Storage or
+// /s3/bucket/object for AWS S3.
+package wkfs // import "go4.org/wkfs"
+
+import (
+ "io"
+ "io/ioutil"
+ "os"
+ "strings"
+)
+
+type File interface {
+ io.Reader
+ io.ReaderAt
+ io.Closer
+ io.Seeker
+ Name() string
+ Stat() (os.FileInfo, error)
+}
+
+type FileWriter interface {
+ io.Writer
+ io.Closer
+}
+
+func Open(name string) (File, error) { return fs(name).Open(name) }
+func Stat(name string) (os.FileInfo, error) { return fs(name).Stat(name) }
+func Lstat(name string) (os.FileInfo, error) { return fs(name).Lstat(name) }
+func MkdirAll(path string, perm os.FileMode) error { return fs(path).MkdirAll(path, perm) }
+func OpenFile(name string, flag int, perm os.FileMode) (FileWriter, error) {
+ return fs(name).OpenFile(name, flag, perm)
+}
+func Create(name string) (FileWriter, error) {
+ // like os.Create but WRONLY instead of RDWR because we don't
+ // expose a Reader here.
+ return OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
+}
+
+func fs(name string) FileSystem {
+ for pfx, fs := range wkFS {
+ if strings.HasPrefix(name, pfx) {
+ return fs
+ }
+ }
+ return osFS{}
+}
+
+type osFS struct{}
+
+func (osFS) Open(name string) (File, error) { return os.Open(name) }
+func (osFS) Stat(name string) (os.FileInfo, error) { return os.Stat(name) }
+func (osFS) Lstat(name string) (os.FileInfo, error) { return os.Lstat(name) }
+func (osFS) MkdirAll(path string, perm os.FileMode) error { return os.MkdirAll(path, perm) }
+func (osFS) OpenFile(name string, flag int, perm os.FileMode) (FileWriter, error) {
+ return os.OpenFile(name, flag, perm)
+}
+
+type FileSystem interface {
+ Open(name string) (File, error)
+ OpenFile(name string, flag int, perm os.FileMode) (FileWriter, error)
+ Stat(name string) (os.FileInfo, error)
+ Lstat(name string) (os.FileInfo, error)
+ MkdirAll(path string, perm os.FileMode) error
+}
+
+// well-known filesystems
+var wkFS = map[string]FileSystem{}
+
+// RegisterFS registers a well-known filesystem. It intercepts
+// anything beginning with prefix (which must start and end with a
+// forward slash) and forwards it to fs.
+func RegisterFS(prefix string, fs FileSystem) {
+ if !strings.HasPrefix(prefix, "/") || !strings.HasSuffix(prefix, "/") {
+ panic("bogus prefix: " + prefix)
+ }
+ if _, dup := wkFS[prefix]; dup {
+ panic("duplication registration of " + prefix)
+ }
+ wkFS[prefix] = fs
+}
+
+// WriteFile writes data to a file named by filename.
+// If the file does not exist, WriteFile creates it with permissions perm;
+// otherwise WriteFile truncates it before writing.
+func WriteFile(filename string, data []byte, perm os.FileMode) error {
+ f, err := OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
+ if err != nil {
+ return err
+ }
+ n, err := f.Write(data)
+ if err == nil && n < len(data) {
+ err = io.ErrShortWrite
+ }
+ if err1 := f.Close(); err == nil {
+ err = err1
+ }
+ return err
+}
+
+func ReadFile(filename string) ([]byte, error) {
+ f, err := Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ return ioutil.ReadAll(f)
+}
diff --git a/vendor/golang.org/x/net/http2/Dockerfile b/vendor/golang.org/x/net/http2/Dockerfile
new file mode 100644
index 0000000..53fc525
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/Dockerfile
@@ -0,0 +1,51 @@
+#
+# This Dockerfile builds a recent curl with HTTP/2 client support, using
+# a recent nghttp2 build.
+#
+# See the Makefile for how to tag it. If Docker and that image is found, the
+# Go tests use this curl binary for integration tests.
+#
+
+FROM ubuntu:trusty
+
+RUN apt-get update && \
+ apt-get upgrade -y && \
+ apt-get install -y git-core build-essential wget
+
+RUN apt-get install -y --no-install-recommends \
+ autotools-dev libtool pkg-config zlib1g-dev \
+ libcunit1-dev libssl-dev libxml2-dev libevent-dev \
+ automake autoconf
+
+# The list of packages nghttp2 recommends for h2load:
+RUN apt-get install -y --no-install-recommends make binutils \
+ autoconf automake autotools-dev \
+ libtool pkg-config zlib1g-dev libcunit1-dev libssl-dev libxml2-dev \
+ libev-dev libevent-dev libjansson-dev libjemalloc-dev \
+ cython python3.4-dev python-setuptools
+
+# Note: setting NGHTTP2_VER before the git clone, so an old git clone isn't cached:
+ENV NGHTTP2_VER 895da9a
+RUN cd /root && git clone https://github.com/tatsuhiro-t/nghttp2.git
+
+WORKDIR /root/nghttp2
+RUN git reset --hard $NGHTTP2_VER
+RUN autoreconf -i
+RUN automake
+RUN autoconf
+RUN ./configure
+RUN make
+RUN make install
+
+WORKDIR /root
+RUN wget http://curl.haxx.se/download/curl-7.45.0.tar.gz
+RUN tar -zxvf curl-7.45.0.tar.gz
+WORKDIR /root/curl-7.45.0
+RUN ./configure --with-ssl --with-nghttp2=/usr/local
+RUN make
+RUN make install
+RUN ldconfig
+
+CMD ["-h"]
+ENTRYPOINT ["/usr/local/bin/curl"]
+
diff --git a/vendor/golang.org/x/net/http2/Makefile b/vendor/golang.org/x/net/http2/Makefile
new file mode 100644
index 0000000..55fd826
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/Makefile
@@ -0,0 +1,3 @@
+curlimage:
+ docker build -t gohttp2/curl .
+
diff --git a/vendor/golang.org/x/net/http2/README b/vendor/golang.org/x/net/http2/README
new file mode 100644
index 0000000..360d5aa
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/README
@@ -0,0 +1,20 @@
+This is a work-in-progress HTTP/2 implementation for Go.
+
+It will eventually live in the Go standard library and won't require
+any changes to your code to use. It will just be automatic.
+
+Status:
+
+* The server support is pretty good. A few things are missing
+ but are being worked on.
+* The client work has just started but shares a lot of code
+ is coming along much quicker.
+
+Docs are at https://godoc.org/golang.org/x/net/http2
+
+Demo test server at https://http2.golang.org/
+
+Help & bug reports welcome!
+
+Contributing: https://golang.org/doc/contribute.html
+Bugs: https://golang.org/issue/new?title=x/net/http2:+
diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go
new file mode 100644
index 0000000..8cb3eaa
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/client_conn_pool.go
@@ -0,0 +1,246 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Transport code's client connection pooling.
+
+package http2
+
+import (
+ "crypto/tls"
+ "net/http"
+ "sync"
+)
+
+// ClientConnPool manages a pool of HTTP/2 client connections.
+type ClientConnPool interface {
+ GetClientConn(req *http.Request, addr string) (*ClientConn, error)
+ MarkDead(*ClientConn)
+}
+
+// clientConnPoolIdleCloser is the interface implemented by ClientConnPool
+// implementations which can close their idle connections.
+type clientConnPoolIdleCloser interface {
+ ClientConnPool
+ closeIdleConnections()
+}
+
+var (
+ _ clientConnPoolIdleCloser = (*clientConnPool)(nil)
+ _ clientConnPoolIdleCloser = noDialClientConnPool{}
+)
+
+// TODO: use singleflight for dialing and addConnCalls?
+type clientConnPool struct {
+ t *Transport
+
+ mu sync.Mutex // TODO: maybe switch to RWMutex
+ // TODO: add support for sharing conns based on cert names
+ // (e.g. share conn for googleapis.com and appspot.com)
+ conns map[string][]*ClientConn // key is host:port
+ dialing map[string]*dialCall // currently in-flight dials
+ keys map[*ClientConn][]string
+ addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls
+}
+
+func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
+ return p.getClientConn(req, addr, dialOnMiss)
+}
+
+const (
+ dialOnMiss = true
+ noDialOnMiss = false
+)
+
+func (p *clientConnPool) getClientConn(_ *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) {
+ p.mu.Lock()
+ for _, cc := range p.conns[addr] {
+ if cc.CanTakeNewRequest() {
+ p.mu.Unlock()
+ return cc, nil
+ }
+ }
+ if !dialOnMiss {
+ p.mu.Unlock()
+ return nil, ErrNoCachedConn
+ }
+ call := p.getStartDialLocked(addr)
+ p.mu.Unlock()
+ <-call.done
+ return call.res, call.err
+}
+
+// dialCall is an in-flight Transport dial call to a host.
+type dialCall struct {
+ p *clientConnPool
+ done chan struct{} // closed when done
+ res *ClientConn // valid after done is closed
+ err error // valid after done is closed
+}
+
+// requires p.mu is held.
+func (p *clientConnPool) getStartDialLocked(addr string) *dialCall {
+ if call, ok := p.dialing[addr]; ok {
+ // A dial is already in-flight. Don't start another.
+ return call
+ }
+ call := &dialCall{p: p, done: make(chan struct{})}
+ if p.dialing == nil {
+ p.dialing = make(map[string]*dialCall)
+ }
+ p.dialing[addr] = call
+ go call.dial(addr)
+ return call
+}
+
+// run in its own goroutine.
+func (c *dialCall) dial(addr string) {
+ c.res, c.err = c.p.t.dialClientConn(addr)
+ close(c.done)
+
+ c.p.mu.Lock()
+ delete(c.p.dialing, addr)
+ if c.err == nil {
+ c.p.addConnLocked(addr, c.res)
+ }
+ c.p.mu.Unlock()
+}
+
+// addConnIfNeeded makes a NewClientConn out of c if a connection for key doesn't
+// already exist. It coalesces concurrent calls with the same key.
+// This is used by the http1 Transport code when it creates a new connection. Because
+// the http1 Transport doesn't de-dup TCP dials to outbound hosts (because it doesn't know
+// the protocol), it can get into a situation where it has multiple TLS connections.
+// This code decides which ones live or die.
+// The return value used is whether c was used.
+// c is never closed.
+func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn) (used bool, err error) {
+ p.mu.Lock()
+ for _, cc := range p.conns[key] {
+ if cc.CanTakeNewRequest() {
+ p.mu.Unlock()
+ return false, nil
+ }
+ }
+ call, dup := p.addConnCalls[key]
+ if !dup {
+ if p.addConnCalls == nil {
+ p.addConnCalls = make(map[string]*addConnCall)
+ }
+ call = &addConnCall{
+ p: p,
+ done: make(chan struct{}),
+ }
+ p.addConnCalls[key] = call
+ go call.run(t, key, c)
+ }
+ p.mu.Unlock()
+
+ <-call.done
+ if call.err != nil {
+ return false, call.err
+ }
+ return !dup, nil
+}
+
+type addConnCall struct {
+ p *clientConnPool
+ done chan struct{} // closed when done
+ err error
+}
+
+func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) {
+ cc, err := t.NewClientConn(tc)
+
+ p := c.p
+ p.mu.Lock()
+ if err != nil {
+ c.err = err
+ } else {
+ p.addConnLocked(key, cc)
+ }
+ delete(p.addConnCalls, key)
+ p.mu.Unlock()
+ close(c.done)
+}
+
+func (p *clientConnPool) addConn(key string, cc *ClientConn) {
+ p.mu.Lock()
+ p.addConnLocked(key, cc)
+ p.mu.Unlock()
+}
+
+// p.mu must be held
+func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) {
+ for _, v := range p.conns[key] {
+ if v == cc {
+ return
+ }
+ }
+ if p.conns == nil {
+ p.conns = make(map[string][]*ClientConn)
+ }
+ if p.keys == nil {
+ p.keys = make(map[*ClientConn][]string)
+ }
+ p.conns[key] = append(p.conns[key], cc)
+ p.keys[cc] = append(p.keys[cc], key)
+}
+
+func (p *clientConnPool) MarkDead(cc *ClientConn) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ for _, key := range p.keys[cc] {
+ vv, ok := p.conns[key]
+ if !ok {
+ continue
+ }
+ newList := filterOutClientConn(vv, cc)
+ if len(newList) > 0 {
+ p.conns[key] = newList
+ } else {
+ delete(p.conns, key)
+ }
+ }
+ delete(p.keys, cc)
+}
+
+func (p *clientConnPool) closeIdleConnections() {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ // TODO: don't close a cc if it was just added to the pool
+ // milliseconds ago and has never been used. There's currently
+ // a small race window with the HTTP/1 Transport's integration
+ // where it can add an idle conn just before using it, and
+ // somebody else can concurrently call CloseIdleConns and
+ // break some caller's RoundTrip.
+ for _, vv := range p.conns {
+ for _, cc := range vv {
+ cc.closeIfIdle()
+ }
+ }
+}
+
+func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn {
+ out := in[:0]
+ for _, v := range in {
+ if v != exclude {
+ out = append(out, v)
+ }
+ }
+ // If we filtered it out, zero out the last item to prevent
+ // the GC from seeing it.
+ if len(in) != len(out) {
+ in[len(in)-1] = nil
+ }
+ return out
+}
+
+// noDialClientConnPool is an implementation of http2.ClientConnPool
+// which never dials. We let the HTTP/1.1 client dial and use its TLS
+// connection instead.
+type noDialClientConnPool struct{ *clientConnPool }
+
+func (p noDialClientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
+ return p.getClientConn(req, addr, noDialOnMiss)
+}
diff --git a/vendor/golang.org/x/net/http2/configure_transport.go b/vendor/golang.org/x/net/http2/configure_transport.go
new file mode 100644
index 0000000..d87ba0f
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/configure_transport.go
@@ -0,0 +1,80 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.6
+
+package http2
+
+import (
+ "crypto/tls"
+ "fmt"
+ "net/http"
+)
+
+func configureTransport(t1 *http.Transport) (*Transport, error) {
+ connPool := new(clientConnPool)
+ t2 := &Transport{
+ ConnPool: noDialClientConnPool{connPool},
+ t1: t1,
+ }
+ connPool.t = t2
+ if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil {
+ return nil, err
+ }
+ if t1.TLSClientConfig == nil {
+ t1.TLSClientConfig = new(tls.Config)
+ }
+ if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") {
+ t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...)
+ }
+ if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") {
+ t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1")
+ }
+ upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper {
+ addr := authorityAddr(authority)
+ if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil {
+ go c.Close()
+ return erringRoundTripper{err}
+ } else if !used {
+ // Turns out we don't need this c.
+ // For example, two goroutines made requests to the same host
+ // at the same time, both kicking off TCP dials. (since protocol
+ // was unknown)
+ go c.Close()
+ }
+ return t2
+ }
+ if m := t1.TLSNextProto; len(m) == 0 {
+ t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{
+ "h2": upgradeFn,
+ }
+ } else {
+ m["h2"] = upgradeFn
+ }
+ return t2, nil
+}
+
+// registerHTTPSProtocol calls Transport.RegisterProtocol but
+// convering panics into errors.
+func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) {
+ defer func() {
+ if e := recover(); e != nil {
+ err = fmt.Errorf("%v", e)
+ }
+ }()
+ t.RegisterProtocol("https", rt)
+ return nil
+}
+
+// noDialH2RoundTripper is a RoundTripper which only tries to complete the request
+// if there's already has a cached connection to the host.
+type noDialH2RoundTripper struct{ t *Transport }
+
+func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+ res, err := rt.t.RoundTrip(req)
+ if err == ErrNoCachedConn {
+ return nil, http.ErrSkipAltProtocol
+ }
+ return res, err
+}
diff --git a/vendor/golang.org/x/net/http2/errors.go b/vendor/golang.org/x/net/http2/errors.go
new file mode 100644
index 0000000..71a4e29
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/errors.go
@@ -0,0 +1,122 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "errors"
+ "fmt"
+)
+
+// An ErrCode is an unsigned 32-bit error code as defined in the HTTP/2 spec.
+type ErrCode uint32
+
+const (
+ ErrCodeNo ErrCode = 0x0
+ ErrCodeProtocol ErrCode = 0x1
+ ErrCodeInternal ErrCode = 0x2
+ ErrCodeFlowControl ErrCode = 0x3
+ ErrCodeSettingsTimeout ErrCode = 0x4
+ ErrCodeStreamClosed ErrCode = 0x5
+ ErrCodeFrameSize ErrCode = 0x6
+ ErrCodeRefusedStream ErrCode = 0x7
+ ErrCodeCancel ErrCode = 0x8
+ ErrCodeCompression ErrCode = 0x9
+ ErrCodeConnect ErrCode = 0xa
+ ErrCodeEnhanceYourCalm ErrCode = 0xb
+ ErrCodeInadequateSecurity ErrCode = 0xc
+ ErrCodeHTTP11Required ErrCode = 0xd
+)
+
+var errCodeName = map[ErrCode]string{
+ ErrCodeNo: "NO_ERROR",
+ ErrCodeProtocol: "PROTOCOL_ERROR",
+ ErrCodeInternal: "INTERNAL_ERROR",
+ ErrCodeFlowControl: "FLOW_CONTROL_ERROR",
+ ErrCodeSettingsTimeout: "SETTINGS_TIMEOUT",
+ ErrCodeStreamClosed: "STREAM_CLOSED",
+ ErrCodeFrameSize: "FRAME_SIZE_ERROR",
+ ErrCodeRefusedStream: "REFUSED_STREAM",
+ ErrCodeCancel: "CANCEL",
+ ErrCodeCompression: "COMPRESSION_ERROR",
+ ErrCodeConnect: "CONNECT_ERROR",
+ ErrCodeEnhanceYourCalm: "ENHANCE_YOUR_CALM",
+ ErrCodeInadequateSecurity: "INADEQUATE_SECURITY",
+ ErrCodeHTTP11Required: "HTTP_1_1_REQUIRED",
+}
+
+func (e ErrCode) String() string {
+ if s, ok := errCodeName[e]; ok {
+ return s
+ }
+ return fmt.Sprintf("unknown error code 0x%x", uint32(e))
+}
+
+// ConnectionError is an error that results in the termination of the
+// entire connection.
+type ConnectionError ErrCode
+
+func (e ConnectionError) Error() string { return fmt.Sprintf("connection error: %s", ErrCode(e)) }
+
+// StreamError is an error that only affects one stream within an
+// HTTP/2 connection.
+type StreamError struct {
+ StreamID uint32
+ Code ErrCode
+}
+
+func (e StreamError) Error() string {
+ return fmt.Sprintf("stream error: stream ID %d; %v", e.StreamID, e.Code)
+}
+
+// 6.9.1 The Flow Control Window
+// "If a sender receives a WINDOW_UPDATE that causes a flow control
+// window to exceed this maximum it MUST terminate either the stream
+// or the connection, as appropriate. For streams, [...]; for the
+// connection, a GOAWAY frame with a FLOW_CONTROL_ERROR code."
+type goAwayFlowError struct{}
+
+func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" }
+
+// connErrorReason wraps a ConnectionError with an informative error about why it occurs.
+
+// Errors of this type are only returned by the frame parser functions
+// and converted into ConnectionError(ErrCodeProtocol).
+type connError struct {
+ Code ErrCode
+ Reason string
+}
+
+func (e connError) Error() string {
+ return fmt.Sprintf("http2: connection error: %v: %v", e.Code, e.Reason)
+}
+
+type pseudoHeaderError string
+
+func (e pseudoHeaderError) Error() string {
+ return fmt.Sprintf("invalid pseudo-header %q", string(e))
+}
+
+type duplicatePseudoHeaderError string
+
+func (e duplicatePseudoHeaderError) Error() string {
+ return fmt.Sprintf("duplicate pseudo-header %q", string(e))
+}
+
+type headerFieldNameError string
+
+func (e headerFieldNameError) Error() string {
+ return fmt.Sprintf("invalid header field name %q", string(e))
+}
+
+type headerFieldValueError string
+
+func (e headerFieldValueError) Error() string {
+ return fmt.Sprintf("invalid header field value %q", string(e))
+}
+
+var (
+ errMixPseudoHeaderTypes = errors.New("mix of request and response pseudo headers")
+ errPseudoAfterRegular = errors.New("pseudo header field after regular")
+)
diff --git a/vendor/golang.org/x/net/http2/fixed_buffer.go b/vendor/golang.org/x/net/http2/fixed_buffer.go
new file mode 100644
index 0000000..47da0f0
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/fixed_buffer.go
@@ -0,0 +1,60 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "errors"
+)
+
+// fixedBuffer is an io.ReadWriter backed by a fixed size buffer.
+// It never allocates, but moves old data as new data is written.
+type fixedBuffer struct {
+ buf []byte
+ r, w int
+}
+
+var (
+ errReadEmpty = errors.New("read from empty fixedBuffer")
+ errWriteFull = errors.New("write on full fixedBuffer")
+)
+
+// Read copies bytes from the buffer into p.
+// It is an error to read when no data is available.
+func (b *fixedBuffer) Read(p []byte) (n int, err error) {
+ if b.r == b.w {
+ return 0, errReadEmpty
+ }
+ n = copy(p, b.buf[b.r:b.w])
+ b.r += n
+ if b.r == b.w {
+ b.r = 0
+ b.w = 0
+ }
+ return n, nil
+}
+
+// Len returns the number of bytes of the unread portion of the buffer.
+func (b *fixedBuffer) Len() int {
+ return b.w - b.r
+}
+
+// Write copies bytes from p into the buffer.
+// It is an error to write more data than the buffer can hold.
+func (b *fixedBuffer) Write(p []byte) (n int, err error) {
+ // Slide existing data to beginning.
+ if b.r > 0 && len(p) > len(b.buf)-b.w {
+ copy(b.buf, b.buf[b.r:b.w])
+ b.w -= b.r
+ b.r = 0
+ }
+
+ // Write new data.
+ n = copy(b.buf[b.w:], p)
+ b.w += n
+ if n < len(p) {
+ err = errWriteFull
+ }
+ return n, err
+}
diff --git a/vendor/golang.org/x/net/http2/flow.go b/vendor/golang.org/x/net/http2/flow.go
new file mode 100644
index 0000000..957de25
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/flow.go
@@ -0,0 +1,50 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Flow control
+
+package http2
+
+// flow is the flow control window's size.
+type flow struct {
+ // n is the number of DATA bytes we're allowed to send.
+ // A flow is kept both on a conn and a per-stream.
+ n int32
+
+ // conn points to the shared connection-level flow that is
+ // shared by all streams on that conn. It is nil for the flow
+ // that's on the conn directly.
+ conn *flow
+}
+
+func (f *flow) setConnFlow(cf *flow) { f.conn = cf }
+
+func (f *flow) available() int32 {
+ n := f.n
+ if f.conn != nil && f.conn.n < n {
+ n = f.conn.n
+ }
+ return n
+}
+
+func (f *flow) take(n int32) {
+ if n > f.available() {
+ panic("internal error: took too much")
+ }
+ f.n -= n
+ if f.conn != nil {
+ f.conn.n -= n
+ }
+}
+
+// add adds n bytes (positive or negative) to the flow control window.
+// It returns false if the sum would exceed 2^31-1.
+func (f *flow) add(n int32) bool {
+ remain := (1<<31 - 1) - f.n
+ if n > remain {
+ return false
+ }
+ f.n += n
+ return true
+}
diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go
new file mode 100644
index 0000000..88067dc
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/frame.go
@@ -0,0 +1,1507 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "strings"
+ "sync"
+
+ "golang.org/x/net/http2/hpack"
+ "golang.org/x/net/lex/httplex"
+)
+
+const frameHeaderLen = 9
+
+var padZeros = make([]byte, 255) // zeros for padding
+
+// A FrameType is a registered frame type as defined in
+// http://http2.github.io/http2-spec/#rfc.section.11.2
+type FrameType uint8
+
+const (
+ FrameData FrameType = 0x0
+ FrameHeaders FrameType = 0x1
+ FramePriority FrameType = 0x2
+ FrameRSTStream FrameType = 0x3
+ FrameSettings FrameType = 0x4
+ FramePushPromise FrameType = 0x5
+ FramePing FrameType = 0x6
+ FrameGoAway FrameType = 0x7
+ FrameWindowUpdate FrameType = 0x8
+ FrameContinuation FrameType = 0x9
+)
+
+var frameName = map[FrameType]string{
+ FrameData: "DATA",
+ FrameHeaders: "HEADERS",
+ FramePriority: "PRIORITY",
+ FrameRSTStream: "RST_STREAM",
+ FrameSettings: "SETTINGS",
+ FramePushPromise: "PUSH_PROMISE",
+ FramePing: "PING",
+ FrameGoAway: "GOAWAY",
+ FrameWindowUpdate: "WINDOW_UPDATE",
+ FrameContinuation: "CONTINUATION",
+}
+
+func (t FrameType) String() string {
+ if s, ok := frameName[t]; ok {
+ return s
+ }
+ return fmt.Sprintf("UNKNOWN_FRAME_TYPE_%d", uint8(t))
+}
+
+// Flags is a bitmask of HTTP/2 flags.
+// The meaning of flags varies depending on the frame type.
+type Flags uint8
+
+// Has reports whether f contains all (0 or more) flags in v.
+func (f Flags) Has(v Flags) bool {
+ return (f & v) == v
+}
+
+// Frame-specific FrameHeader flag bits.
+const (
+ // Data Frame
+ FlagDataEndStream Flags = 0x1
+ FlagDataPadded Flags = 0x8
+
+ // Headers Frame
+ FlagHeadersEndStream Flags = 0x1
+ FlagHeadersEndHeaders Flags = 0x4
+ FlagHeadersPadded Flags = 0x8
+ FlagHeadersPriority Flags = 0x20
+
+ // Settings Frame
+ FlagSettingsAck Flags = 0x1
+
+ // Ping Frame
+ FlagPingAck Flags = 0x1
+
+ // Continuation Frame
+ FlagContinuationEndHeaders Flags = 0x4
+
+ FlagPushPromiseEndHeaders Flags = 0x4
+ FlagPushPromisePadded Flags = 0x8
+)
+
+var flagName = map[FrameType]map[Flags]string{
+ FrameData: {
+ FlagDataEndStream: "END_STREAM",
+ FlagDataPadded: "PADDED",
+ },
+ FrameHeaders: {
+ FlagHeadersEndStream: "END_STREAM",
+ FlagHeadersEndHeaders: "END_HEADERS",
+ FlagHeadersPadded: "PADDED",
+ FlagHeadersPriority: "PRIORITY",
+ },
+ FrameSettings: {
+ FlagSettingsAck: "ACK",
+ },
+ FramePing: {
+ FlagPingAck: "ACK",
+ },
+ FrameContinuation: {
+ FlagContinuationEndHeaders: "END_HEADERS",
+ },
+ FramePushPromise: {
+ FlagPushPromiseEndHeaders: "END_HEADERS",
+ FlagPushPromisePadded: "PADDED",
+ },
+}
+
+// a frameParser parses a frame given its FrameHeader and payload
+// bytes. The length of payload will always equal fh.Length (which
+// might be 0).
+type frameParser func(fh FrameHeader, payload []byte) (Frame, error)
+
+var frameParsers = map[FrameType]frameParser{
+ FrameData: parseDataFrame,
+ FrameHeaders: parseHeadersFrame,
+ FramePriority: parsePriorityFrame,
+ FrameRSTStream: parseRSTStreamFrame,
+ FrameSettings: parseSettingsFrame,
+ FramePushPromise: parsePushPromise,
+ FramePing: parsePingFrame,
+ FrameGoAway: parseGoAwayFrame,
+ FrameWindowUpdate: parseWindowUpdateFrame,
+ FrameContinuation: parseContinuationFrame,
+}
+
+func typeFrameParser(t FrameType) frameParser {
+ if f := frameParsers[t]; f != nil {
+ return f
+ }
+ return parseUnknownFrame
+}
+
+// A FrameHeader is the 9 byte header of all HTTP/2 frames.
+//
+// See http://http2.github.io/http2-spec/#FrameHeader
+type FrameHeader struct {
+ valid bool // caller can access []byte fields in the Frame
+
+ // Type is the 1 byte frame type. There are ten standard frame
+ // types, but extension frame types may be written by WriteRawFrame
+ // and will be returned by ReadFrame (as UnknownFrame).
+ Type FrameType
+
+ // Flags are the 1 byte of 8 potential bit flags per frame.
+ // They are specific to the frame type.
+ Flags Flags
+
+ // Length is the length of the frame, not including the 9 byte header.
+ // The maximum size is one byte less than 16MB (uint24), but only
+ // frames up to 16KB are allowed without peer agreement.
+ Length uint32
+
+ // StreamID is which stream this frame is for. Certain frames
+ // are not stream-specific, in which case this field is 0.
+ StreamID uint32
+}
+
+// Header returns h. It exists so FrameHeaders can be embedded in other
+// specific frame types and implement the Frame interface.
+func (h FrameHeader) Header() FrameHeader { return h }
+
+func (h FrameHeader) String() string {
+ var buf bytes.Buffer
+ buf.WriteString("[FrameHeader ")
+ h.writeDebug(&buf)
+ buf.WriteByte(']')
+ return buf.String()
+}
+
+func (h FrameHeader) writeDebug(buf *bytes.Buffer) {
+ buf.WriteString(h.Type.String())
+ if h.Flags != 0 {
+ buf.WriteString(" flags=")
+ set := 0
+ for i := uint8(0); i < 8; i++ {
+ if h.Flags&(1<<i) == 0 {
+ continue
+ }
+ set++
+ if set > 1 {
+ buf.WriteByte('|')
+ }
+ name := flagName[h.Type][Flags(1<<i)]
+ if name != "" {
+ buf.WriteString(name)
+ } else {
+ fmt.Fprintf(buf, "0x%x", 1<<i)
+ }
+ }
+ }
+ if h.StreamID != 0 {
+ fmt.Fprintf(buf, " stream=%d", h.StreamID)
+ }
+ fmt.Fprintf(buf, " len=%d", h.Length)
+}
+
+func (h *FrameHeader) checkValid() {
+ if !h.valid {
+ panic("Frame accessor called on non-owned Frame")
+ }
+}
+
+func (h *FrameHeader) invalidate() { h.valid = false }
+
+// frame header bytes.
+// Used only by ReadFrameHeader.
+var fhBytes = sync.Pool{
+ New: func() interface{} {
+ buf := make([]byte, frameHeaderLen)
+ return &buf
+ },
+}
+
+// ReadFrameHeader reads 9 bytes from r and returns a FrameHeader.
+// Most users should use Framer.ReadFrame instead.
+func ReadFrameHeader(r io.Reader) (FrameHeader, error) {
+ bufp := fhBytes.Get().(*[]byte)
+ defer fhBytes.Put(bufp)
+ return readFrameHeader(*bufp, r)
+}
+
+func readFrameHeader(buf []byte, r io.Reader) (FrameHeader, error) {
+ _, err := io.ReadFull(r, buf[:frameHeaderLen])
+ if err != nil {
+ return FrameHeader{}, err
+ }
+ return FrameHeader{
+ Length: (uint32(buf[0])<<16 | uint32(buf[1])<<8 | uint32(buf[2])),
+ Type: FrameType(buf[3]),
+ Flags: Flags(buf[4]),
+ StreamID: binary.BigEndian.Uint32(buf[5:]) & (1<<31 - 1),
+ valid: true,
+ }, nil
+}
+
+// A Frame is the base interface implemented by all frame types.
+// Callers will generally type-assert the specific frame type:
+// *HeadersFrame, *SettingsFrame, *WindowUpdateFrame, etc.
+//
+// Frames are only valid until the next call to Framer.ReadFrame.
+type Frame interface {
+ Header() FrameHeader
+
+ // invalidate is called by Framer.ReadFrame to make this
+ // frame's buffers as being invalid, since the subsequent
+ // frame will reuse them.
+ invalidate()
+}
+
+// A Framer reads and writes Frames.
+type Framer struct {
+ r io.Reader
+ lastFrame Frame
+ errDetail error
+
+ // lastHeaderStream is non-zero if the last frame was an
+ // unfinished HEADERS/CONTINUATION.
+ lastHeaderStream uint32
+
+ maxReadSize uint32
+ headerBuf [frameHeaderLen]byte
+
+ // TODO: let getReadBuf be configurable, and use a less memory-pinning
+ // allocator in server.go to minimize memory pinned for many idle conns.
+ // Will probably also need to make frame invalidation have a hook too.
+ getReadBuf func(size uint32) []byte
+ readBuf []byte // cache for default getReadBuf
+
+ maxWriteSize uint32 // zero means unlimited; TODO: implement
+
+ w io.Writer
+ wbuf []byte
+
+ // AllowIllegalWrites permits the Framer's Write methods to
+ // write frames that do not conform to the HTTP/2 spec. This
+ // permits using the Framer to test other HTTP/2
+ // implementations' conformance to the spec.
+ // If false, the Write methods will prefer to return an error
+ // rather than comply.
+ AllowIllegalWrites bool
+
+ // AllowIllegalReads permits the Framer's ReadFrame method
+ // to return non-compliant frames or frame orders.
+ // This is for testing and permits using the Framer to test
+ // other HTTP/2 implementations' conformance to the spec.
+ // It is not compatible with ReadMetaHeaders.
+ AllowIllegalReads bool
+
+ // ReadMetaHeaders if non-nil causes ReadFrame to merge
+ // HEADERS and CONTINUATION frames together and return
+ // MetaHeadersFrame instead.
+ ReadMetaHeaders *hpack.Decoder
+
+ // MaxHeaderListSize is the http2 MAX_HEADER_LIST_SIZE.
+ // It's used only if ReadMetaHeaders is set; 0 means a sane default
+ // (currently 16MB)
+ // If the limit is hit, MetaHeadersFrame.Truncated is set true.
+ MaxHeaderListSize uint32
+
+ // TODO: track which type of frame & with which flags was sent
+ // last. Then return an error (unless AllowIllegalWrites) if
+ // we're in the middle of a header block and a
+ // non-Continuation or Continuation on a different stream is
+ // attempted to be written.
+
+ logReads bool
+
+ debugFramer *Framer // only use for logging written writes
+ debugFramerBuf *bytes.Buffer
+}
+
+func (fr *Framer) maxHeaderListSize() uint32 {
+ if fr.MaxHeaderListSize == 0 {
+ return 16 << 20 // sane default, per docs
+ }
+ return fr.MaxHeaderListSize
+}
+
+func (f *Framer) startWrite(ftype FrameType, flags Flags, streamID uint32) {
+ // Write the FrameHeader.
+ f.wbuf = append(f.wbuf[:0],
+ 0, // 3 bytes of length, filled in in endWrite
+ 0,
+ 0,
+ byte(ftype),
+ byte(flags),
+ byte(streamID>>24),
+ byte(streamID>>16),
+ byte(streamID>>8),
+ byte(streamID))
+}
+
+func (f *Framer) endWrite() error {
+ // Now that we know the final size, fill in the FrameHeader in
+ // the space previously reserved for it. Abuse append.
+ length := len(f.wbuf) - frameHeaderLen
+ if length >= (1 << 24) {
+ return ErrFrameTooLarge
+ }
+ _ = append(f.wbuf[:0],
+ byte(length>>16),
+ byte(length>>8),
+ byte(length))
+ if logFrameWrites {
+ f.logWrite()
+ }
+
+ n, err := f.w.Write(f.wbuf)
+ if err == nil && n != len(f.wbuf) {
+ err = io.ErrShortWrite
+ }
+ return err
+}
+
+func (f *Framer) logWrite() {
+ if f.debugFramer == nil {
+ f.debugFramerBuf = new(bytes.Buffer)
+ f.debugFramer = NewFramer(nil, f.debugFramerBuf)
+ f.debugFramer.logReads = false // we log it ourselves, saying "wrote" below
+ // Let us read anything, even if we accidentally wrote it
+ // in the wrong order:
+ f.debugFramer.AllowIllegalReads = true
+ }
+ f.debugFramerBuf.Write(f.wbuf)
+ fr, err := f.debugFramer.ReadFrame()
+ if err != nil {
+ log.Printf("http2: Framer %p: failed to decode just-written frame", f)
+ return
+ }
+ log.Printf("http2: Framer %p: wrote %v", f, summarizeFrame(fr))
+}
+
+func (f *Framer) writeByte(v byte) { f.wbuf = append(f.wbuf, v) }
+func (f *Framer) writeBytes(v []byte) { f.wbuf = append(f.wbuf, v...) }
+func (f *Framer) writeUint16(v uint16) { f.wbuf = append(f.wbuf, byte(v>>8), byte(v)) }
+func (f *Framer) writeUint32(v uint32) {
+ f.wbuf = append(f.wbuf, byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
+}
+
+const (
+ minMaxFrameSize = 1 << 14
+ maxFrameSize = 1<<24 - 1
+)
+
+// NewFramer returns a Framer that writes frames to w and reads them from r.
+func NewFramer(w io.Writer, r io.Reader) *Framer {
+ fr := &Framer{
+ w: w,
+ r: r,
+ logReads: logFrameReads,
+ }
+ fr.getReadBuf = func(size uint32) []byte {
+ if cap(fr.readBuf) >= int(size) {
+ return fr.readBuf[:size]
+ }
+ fr.readBuf = make([]byte, size)
+ return fr.readBuf
+ }
+ fr.SetMaxReadFrameSize(maxFrameSize)
+ return fr
+}
+
+// SetMaxReadFrameSize sets the maximum size of a frame
+// that will be read by a subsequent call to ReadFrame.
+// It is the caller's responsibility to advertise this
+// limit with a SETTINGS frame.
+func (fr *Framer) SetMaxReadFrameSize(v uint32) {
+ if v > maxFrameSize {
+ v = maxFrameSize
+ }
+ fr.maxReadSize = v
+}
+
+// ErrorDetail returns a more detailed error of the last error
+// returned by Framer.ReadFrame. For instance, if ReadFrame
+// returns a StreamError with code PROTOCOL_ERROR, ErrorDetail
+// will say exactly what was invalid. ErrorDetail is not guaranteed
+// to return a non-nil value and like the rest of the http2 package,
+// its return value is not protected by an API compatibility promise.
+// ErrorDetail is reset after the next call to ReadFrame.
+func (fr *Framer) ErrorDetail() error {
+ return fr.errDetail
+}
+
+// ErrFrameTooLarge is returned from Framer.ReadFrame when the peer
+// sends a frame that is larger than declared with SetMaxReadFrameSize.
+var ErrFrameTooLarge = errors.New("http2: frame too large")
+
+// terminalReadFrameError reports whether err is an unrecoverable
+// error from ReadFrame and no other frames should be read.
+func terminalReadFrameError(err error) bool {
+ if _, ok := err.(StreamError); ok {
+ return false
+ }
+ return err != nil
+}
+
+// ReadFrame reads a single frame. The returned Frame is only valid
+// until the next call to ReadFrame.
+//
+// If the frame is larger than previously set with SetMaxReadFrameSize, the
+// returned error is ErrFrameTooLarge. Other errors may be of type
+// ConnectionError, StreamError, or anything else from from the underlying
+// reader.
+func (fr *Framer) ReadFrame() (Frame, error) {
+ fr.errDetail = nil
+ if fr.lastFrame != nil {
+ fr.lastFrame.invalidate()
+ }
+ fh, err := readFrameHeader(fr.headerBuf[:], fr.r)
+ if err != nil {
+ return nil, err
+ }
+ if fh.Length > fr.maxReadSize {
+ return nil, ErrFrameTooLarge
+ }
+ payload := fr.getReadBuf(fh.Length)
+ if _, err := io.ReadFull(fr.r, payload); err != nil {
+ return nil, err
+ }
+ f, err := typeFrameParser(fh.Type)(fh, payload)
+ if err != nil {
+ if ce, ok := err.(connError); ok {
+ return nil, fr.connError(ce.Code, ce.Reason)
+ }
+ return nil, err
+ }
+ if err := fr.checkFrameOrder(f); err != nil {
+ return nil, err
+ }
+ if fr.logReads {
+ log.Printf("http2: Framer %p: read %v", fr, summarizeFrame(f))
+ }
+ if fh.Type == FrameHeaders && fr.ReadMetaHeaders != nil {
+ return fr.readMetaFrame(f.(*HeadersFrame))
+ }
+ return f, nil
+}
+
+// connError returns ConnectionError(code) but first
+// stashes away a public reason to the caller can optionally relay it
+// to the peer before hanging up on them. This might help others debug
+// their implementations.
+func (fr *Framer) connError(code ErrCode, reason string) error {
+ fr.errDetail = errors.New(reason)
+ return ConnectionError(code)
+}
+
+// checkFrameOrder reports an error if f is an invalid frame to return
+// next from ReadFrame. Mostly it checks whether HEADERS and
+// CONTINUATION frames are contiguous.
+func (fr *Framer) checkFrameOrder(f Frame) error {
+ last := fr.lastFrame
+ fr.lastFrame = f
+ if fr.AllowIllegalReads {
+ return nil
+ }
+
+ fh := f.Header()
+ if fr.lastHeaderStream != 0 {
+ if fh.Type != FrameContinuation {
+ return fr.connError(ErrCodeProtocol,
+ fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d",
+ fh.Type, fh.StreamID,
+ last.Header().Type, fr.lastHeaderStream))
+ }
+ if fh.StreamID != fr.lastHeaderStream {
+ return fr.connError(ErrCodeProtocol,
+ fmt.Sprintf("got CONTINUATION for stream %d; expected stream %d",
+ fh.StreamID, fr.lastHeaderStream))
+ }
+ } else if fh.Type == FrameContinuation {
+ return fr.connError(ErrCodeProtocol, fmt.Sprintf("unexpected CONTINUATION for stream %d", fh.StreamID))
+ }
+
+ switch fh.Type {
+ case FrameHeaders, FrameContinuation:
+ if fh.Flags.Has(FlagHeadersEndHeaders) {
+ fr.lastHeaderStream = 0
+ } else {
+ fr.lastHeaderStream = fh.StreamID
+ }
+ }
+
+ return nil
+}
+
+// A DataFrame conveys arbitrary, variable-length sequences of octets
+// associated with a stream.
+// See http://http2.github.io/http2-spec/#rfc.section.6.1
+type DataFrame struct {
+ FrameHeader
+ data []byte
+}
+
+func (f *DataFrame) StreamEnded() bool {
+ return f.FrameHeader.Flags.Has(FlagDataEndStream)
+}
+
+// Data returns the frame's data octets, not including any padding
+// size byte or padding suffix bytes.
+// The caller must not retain the returned memory past the next
+// call to ReadFrame.
+func (f *DataFrame) Data() []byte {
+ f.checkValid()
+ return f.data
+}
+
+func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) {
+ if fh.StreamID == 0 {
+ // DATA frames MUST be associated with a stream. If a
+ // DATA frame is received whose stream identifier
+ // field is 0x0, the recipient MUST respond with a
+ // connection error (Section 5.4.1) of type
+ // PROTOCOL_ERROR.
+ return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"}
+ }
+ f := &DataFrame{
+ FrameHeader: fh,
+ }
+ var padSize byte
+ if fh.Flags.Has(FlagDataPadded) {
+ var err error
+ payload, padSize, err = readByte(payload)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if int(padSize) > len(payload) {
+ // If the length of the padding is greater than the
+ // length of the frame payload, the recipient MUST
+ // treat this as a connection error.
+ // Filed: https://github.com/http2/http2-spec/issues/610
+ return nil, connError{ErrCodeProtocol, "pad size larger than data payload"}
+ }
+ f.data = payload[:len(payload)-int(padSize)]
+ return f, nil
+}
+
+var (
+ errStreamID = errors.New("invalid stream ID")
+ errDepStreamID = errors.New("invalid dependent stream ID")
+)
+
+func validStreamIDOrZero(streamID uint32) bool {
+ return streamID&(1<<31) == 0
+}
+
+func validStreamID(streamID uint32) bool {
+ return streamID != 0 && streamID&(1<<31) == 0
+}
+
+// WriteData writes a DATA frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error {
+ // TODO: ignoring padding for now. will add when somebody cares.
+ if !validStreamID(streamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ var flags Flags
+ if endStream {
+ flags |= FlagDataEndStream
+ }
+ f.startWrite(FrameData, flags, streamID)
+ f.wbuf = append(f.wbuf, data...)
+ return f.endWrite()
+}
+
+// A SettingsFrame conveys configuration parameters that affect how
+// endpoints communicate, such as preferences and constraints on peer
+// behavior.
+//
+// See http://http2.github.io/http2-spec/#SETTINGS
+type SettingsFrame struct {
+ FrameHeader
+ p []byte
+}
+
+func parseSettingsFrame(fh FrameHeader, p []byte) (Frame, error) {
+ if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 {
+ // When this (ACK 0x1) bit is set, the payload of the
+ // SETTINGS frame MUST be empty. Receipt of a
+ // SETTINGS frame with the ACK flag set and a length
+ // field value other than 0 MUST be treated as a
+ // connection error (Section 5.4.1) of type
+ // FRAME_SIZE_ERROR.
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ if fh.StreamID != 0 {
+ // SETTINGS frames always apply to a connection,
+ // never a single stream. The stream identifier for a
+ // SETTINGS frame MUST be zero (0x0). If an endpoint
+ // receives a SETTINGS frame whose stream identifier
+ // field is anything other than 0x0, the endpoint MUST
+ // respond with a connection error (Section 5.4.1) of
+ // type PROTOCOL_ERROR.
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ if len(p)%6 != 0 {
+ // Expecting even number of 6 byte settings.
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ f := &SettingsFrame{FrameHeader: fh, p: p}
+ if v, ok := f.Value(SettingInitialWindowSize); ok && v > (1<<31)-1 {
+ // Values above the maximum flow control window size of 2^31 - 1 MUST
+ // be treated as a connection error (Section 5.4.1) of type
+ // FLOW_CONTROL_ERROR.
+ return nil, ConnectionError(ErrCodeFlowControl)
+ }
+ return f, nil
+}
+
+func (f *SettingsFrame) IsAck() bool {
+ return f.FrameHeader.Flags.Has(FlagSettingsAck)
+}
+
+func (f *SettingsFrame) Value(s SettingID) (v uint32, ok bool) {
+ f.checkValid()
+ buf := f.p
+ for len(buf) > 0 {
+ settingID := SettingID(binary.BigEndian.Uint16(buf[:2]))
+ if settingID == s {
+ return binary.BigEndian.Uint32(buf[2:6]), true
+ }
+ buf = buf[6:]
+ }
+ return 0, false
+}
+
+// ForeachSetting runs fn for each setting.
+// It stops and returns the first error.
+func (f *SettingsFrame) ForeachSetting(fn func(Setting) error) error {
+ f.checkValid()
+ buf := f.p
+ for len(buf) > 0 {
+ if err := fn(Setting{
+ SettingID(binary.BigEndian.Uint16(buf[:2])),
+ binary.BigEndian.Uint32(buf[2:6]),
+ }); err != nil {
+ return err
+ }
+ buf = buf[6:]
+ }
+ return nil
+}
+
+// WriteSettings writes a SETTINGS frame with zero or more settings
+// specified and the ACK bit not set.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteSettings(settings ...Setting) error {
+ f.startWrite(FrameSettings, 0, 0)
+ for _, s := range settings {
+ f.writeUint16(uint16(s.ID))
+ f.writeUint32(s.Val)
+ }
+ return f.endWrite()
+}
+
+// WriteSettings writes an empty SETTINGS frame with the ACK bit set.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteSettingsAck() error {
+ f.startWrite(FrameSettings, FlagSettingsAck, 0)
+ return f.endWrite()
+}
+
+// A PingFrame is a mechanism for measuring a minimal round trip time
+// from the sender, as well as determining whether an idle connection
+// is still functional.
+// See http://http2.github.io/http2-spec/#rfc.section.6.7
+type PingFrame struct {
+ FrameHeader
+ Data [8]byte
+}
+
+func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) }
+
+func parsePingFrame(fh FrameHeader, payload []byte) (Frame, error) {
+ if len(payload) != 8 {
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ if fh.StreamID != 0 {
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ f := &PingFrame{FrameHeader: fh}
+ copy(f.Data[:], payload)
+ return f, nil
+}
+
+func (f *Framer) WritePing(ack bool, data [8]byte) error {
+ var flags Flags
+ if ack {
+ flags = FlagPingAck
+ }
+ f.startWrite(FramePing, flags, 0)
+ f.writeBytes(data[:])
+ return f.endWrite()
+}
+
+// A GoAwayFrame informs the remote peer to stop creating streams on this connection.
+// See http://http2.github.io/http2-spec/#rfc.section.6.8
+type GoAwayFrame struct {
+ FrameHeader
+ LastStreamID uint32
+ ErrCode ErrCode
+ debugData []byte
+}
+
+// DebugData returns any debug data in the GOAWAY frame. Its contents
+// are not defined.
+// The caller must not retain the returned memory past the next
+// call to ReadFrame.
+func (f *GoAwayFrame) DebugData() []byte {
+ f.checkValid()
+ return f.debugData
+}
+
+func parseGoAwayFrame(fh FrameHeader, p []byte) (Frame, error) {
+ if fh.StreamID != 0 {
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ if len(p) < 8 {
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ return &GoAwayFrame{
+ FrameHeader: fh,
+ LastStreamID: binary.BigEndian.Uint32(p[:4]) & (1<<31 - 1),
+ ErrCode: ErrCode(binary.BigEndian.Uint32(p[4:8])),
+ debugData: p[8:],
+ }, nil
+}
+
+func (f *Framer) WriteGoAway(maxStreamID uint32, code ErrCode, debugData []byte) error {
+ f.startWrite(FrameGoAway, 0, 0)
+ f.writeUint32(maxStreamID & (1<<31 - 1))
+ f.writeUint32(uint32(code))
+ f.writeBytes(debugData)
+ return f.endWrite()
+}
+
+// An UnknownFrame is the frame type returned when the frame type is unknown
+// or no specific frame type parser exists.
+type UnknownFrame struct {
+ FrameHeader
+ p []byte
+}
+
+// Payload returns the frame's payload (after the header). It is not
+// valid to call this method after a subsequent call to
+// Framer.ReadFrame, nor is it valid to retain the returned slice.
+// The memory is owned by the Framer and is invalidated when the next
+// frame is read.
+func (f *UnknownFrame) Payload() []byte {
+ f.checkValid()
+ return f.p
+}
+
+func parseUnknownFrame(fh FrameHeader, p []byte) (Frame, error) {
+ return &UnknownFrame{fh, p}, nil
+}
+
+// A WindowUpdateFrame is used to implement flow control.
+// See http://http2.github.io/http2-spec/#rfc.section.6.9
+type WindowUpdateFrame struct {
+ FrameHeader
+ Increment uint32 // never read with high bit set
+}
+
+func parseWindowUpdateFrame(fh FrameHeader, p []byte) (Frame, error) {
+ if len(p) != 4 {
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit
+ if inc == 0 {
+ // A receiver MUST treat the receipt of a
+ // WINDOW_UPDATE frame with an flow control window
+ // increment of 0 as a stream error (Section 5.4.2) of
+ // type PROTOCOL_ERROR; errors on the connection flow
+ // control window MUST be treated as a connection
+ // error (Section 5.4.1).
+ if fh.StreamID == 0 {
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ return nil, StreamError{fh.StreamID, ErrCodeProtocol}
+ }
+ return &WindowUpdateFrame{
+ FrameHeader: fh,
+ Increment: inc,
+ }, nil
+}
+
+// WriteWindowUpdate writes a WINDOW_UPDATE frame.
+// The increment value must be between 1 and 2,147,483,647, inclusive.
+// If the Stream ID is zero, the window update applies to the
+// connection as a whole.
+func (f *Framer) WriteWindowUpdate(streamID, incr uint32) error {
+ // "The legal range for the increment to the flow control window is 1 to 2^31-1 (2,147,483,647) octets."
+ if (incr < 1 || incr > 2147483647) && !f.AllowIllegalWrites {
+ return errors.New("illegal window increment value")
+ }
+ f.startWrite(FrameWindowUpdate, 0, streamID)
+ f.writeUint32(incr)
+ return f.endWrite()
+}
+
+// A HeadersFrame is used to open a stream and additionally carries a
+// header block fragment.
+type HeadersFrame struct {
+ FrameHeader
+
+ // Priority is set if FlagHeadersPriority is set in the FrameHeader.
+ Priority PriorityParam
+
+ headerFragBuf []byte // not owned
+}
+
+func (f *HeadersFrame) HeaderBlockFragment() []byte {
+ f.checkValid()
+ return f.headerFragBuf
+}
+
+func (f *HeadersFrame) HeadersEnded() bool {
+ return f.FrameHeader.Flags.Has(FlagHeadersEndHeaders)
+}
+
+func (f *HeadersFrame) StreamEnded() bool {
+ return f.FrameHeader.Flags.Has(FlagHeadersEndStream)
+}
+
+func (f *HeadersFrame) HasPriority() bool {
+ return f.FrameHeader.Flags.Has(FlagHeadersPriority)
+}
+
+func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) {
+ hf := &HeadersFrame{
+ FrameHeader: fh,
+ }
+ if fh.StreamID == 0 {
+ // HEADERS frames MUST be associated with a stream. If a HEADERS frame
+ // is received whose stream identifier field is 0x0, the recipient MUST
+ // respond with a connection error (Section 5.4.1) of type
+ // PROTOCOL_ERROR.
+ return nil, connError{ErrCodeProtocol, "HEADERS frame with stream ID 0"}
+ }
+ var padLength uint8
+ if fh.Flags.Has(FlagHeadersPadded) {
+ if p, padLength, err = readByte(p); err != nil {
+ return
+ }
+ }
+ if fh.Flags.Has(FlagHeadersPriority) {
+ var v uint32
+ p, v, err = readUint32(p)
+ if err != nil {
+ return nil, err
+ }
+ hf.Priority.StreamDep = v & 0x7fffffff
+ hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set
+ p, hf.Priority.Weight, err = readByte(p)
+ if err != nil {
+ return nil, err
+ }
+ }
+ if len(p)-int(padLength) <= 0 {
+ return nil, StreamError{fh.StreamID, ErrCodeProtocol}
+ }
+ hf.headerFragBuf = p[:len(p)-int(padLength)]
+ return hf, nil
+}
+
+// HeadersFrameParam are the parameters for writing a HEADERS frame.
+type HeadersFrameParam struct {
+ // StreamID is the required Stream ID to initiate.
+ StreamID uint32
+ // BlockFragment is part (or all) of a Header Block.
+ BlockFragment []byte
+
+ // EndStream indicates that the header block is the last that
+ // the endpoint will send for the identified stream. Setting
+ // this flag causes the stream to enter one of "half closed"
+ // states.
+ EndStream bool
+
+ // EndHeaders indicates that this frame contains an entire
+ // header block and is not followed by any
+ // CONTINUATION frames.
+ EndHeaders bool
+
+ // PadLength is the optional number of bytes of zeros to add
+ // to this frame.
+ PadLength uint8
+
+ // Priority, if non-zero, includes stream priority information
+ // in the HEADER frame.
+ Priority PriorityParam
+}
+
+// WriteHeaders writes a single HEADERS frame.
+//
+// This is a low-level header writing method. Encoding headers and
+// splitting them into any necessary CONTINUATION frames is handled
+// elsewhere.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteHeaders(p HeadersFrameParam) error {
+ if !validStreamID(p.StreamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ var flags Flags
+ if p.PadLength != 0 {
+ flags |= FlagHeadersPadded
+ }
+ if p.EndStream {
+ flags |= FlagHeadersEndStream
+ }
+ if p.EndHeaders {
+ flags |= FlagHeadersEndHeaders
+ }
+ if !p.Priority.IsZero() {
+ flags |= FlagHeadersPriority
+ }
+ f.startWrite(FrameHeaders, flags, p.StreamID)
+ if p.PadLength != 0 {
+ f.writeByte(p.PadLength)
+ }
+ if !p.Priority.IsZero() {
+ v := p.Priority.StreamDep
+ if !validStreamIDOrZero(v) && !f.AllowIllegalWrites {
+ return errDepStreamID
+ }
+ if p.Priority.Exclusive {
+ v |= 1 << 31
+ }
+ f.writeUint32(v)
+ f.writeByte(p.Priority.Weight)
+ }
+ f.wbuf = append(f.wbuf, p.BlockFragment...)
+ f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...)
+ return f.endWrite()
+}
+
+// A PriorityFrame specifies the sender-advised priority of a stream.
+// See http://http2.github.io/http2-spec/#rfc.section.6.3
+type PriorityFrame struct {
+ FrameHeader
+ PriorityParam
+}
+
+// PriorityParam are the stream prioritzation parameters.
+type PriorityParam struct {
+ // StreamDep is a 31-bit stream identifier for the
+ // stream that this stream depends on. Zero means no
+ // dependency.
+ StreamDep uint32
+
+ // Exclusive is whether the dependency is exclusive.
+ Exclusive bool
+
+ // Weight is the stream's zero-indexed weight. It should be
+ // set together with StreamDep, or neither should be set. Per
+ // the spec, "Add one to the value to obtain a weight between
+ // 1 and 256."
+ Weight uint8
+}
+
+func (p PriorityParam) IsZero() bool {
+ return p == PriorityParam{}
+}
+
+func parsePriorityFrame(fh FrameHeader, payload []byte) (Frame, error) {
+ if fh.StreamID == 0 {
+ return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"}
+ }
+ if len(payload) != 5 {
+ return nil, connError{ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))}
+ }
+ v := binary.BigEndian.Uint32(payload[:4])
+ streamID := v & 0x7fffffff // mask off high bit
+ return &PriorityFrame{
+ FrameHeader: fh,
+ PriorityParam: PriorityParam{
+ Weight: payload[4],
+ StreamDep: streamID,
+ Exclusive: streamID != v, // was high bit set?
+ },
+ }, nil
+}
+
+// WritePriority writes a PRIORITY frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WritePriority(streamID uint32, p PriorityParam) error {
+ if !validStreamID(streamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ if !validStreamIDOrZero(p.StreamDep) {
+ return errDepStreamID
+ }
+ f.startWrite(FramePriority, 0, streamID)
+ v := p.StreamDep
+ if p.Exclusive {
+ v |= 1 << 31
+ }
+ f.writeUint32(v)
+ f.writeByte(p.Weight)
+ return f.endWrite()
+}
+
+// A RSTStreamFrame allows for abnormal termination of a stream.
+// See http://http2.github.io/http2-spec/#rfc.section.6.4
+type RSTStreamFrame struct {
+ FrameHeader
+ ErrCode ErrCode
+}
+
+func parseRSTStreamFrame(fh FrameHeader, p []byte) (Frame, error) {
+ if len(p) != 4 {
+ return nil, ConnectionError(ErrCodeFrameSize)
+ }
+ if fh.StreamID == 0 {
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ return &RSTStreamFrame{fh, ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil
+}
+
+// WriteRSTStream writes a RST_STREAM frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteRSTStream(streamID uint32, code ErrCode) error {
+ if !validStreamID(streamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ f.startWrite(FrameRSTStream, 0, streamID)
+ f.writeUint32(uint32(code))
+ return f.endWrite()
+}
+
+// A ContinuationFrame is used to continue a sequence of header block fragments.
+// See http://http2.github.io/http2-spec/#rfc.section.6.10
+type ContinuationFrame struct {
+ FrameHeader
+ headerFragBuf []byte
+}
+
+func parseContinuationFrame(fh FrameHeader, p []byte) (Frame, error) {
+ if fh.StreamID == 0 {
+ return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"}
+ }
+ return &ContinuationFrame{fh, p}, nil
+}
+
+func (f *ContinuationFrame) HeaderBlockFragment() []byte {
+ f.checkValid()
+ return f.headerFragBuf
+}
+
+func (f *ContinuationFrame) HeadersEnded() bool {
+ return f.FrameHeader.Flags.Has(FlagContinuationEndHeaders)
+}
+
+// WriteContinuation writes a CONTINUATION frame.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WriteContinuation(streamID uint32, endHeaders bool, headerBlockFragment []byte) error {
+ if !validStreamID(streamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ var flags Flags
+ if endHeaders {
+ flags |= FlagContinuationEndHeaders
+ }
+ f.startWrite(FrameContinuation, flags, streamID)
+ f.wbuf = append(f.wbuf, headerBlockFragment...)
+ return f.endWrite()
+}
+
+// A PushPromiseFrame is used to initiate a server stream.
+// See http://http2.github.io/http2-spec/#rfc.section.6.6
+type PushPromiseFrame struct {
+ FrameHeader
+ PromiseID uint32
+ headerFragBuf []byte // not owned
+}
+
+func (f *PushPromiseFrame) HeaderBlockFragment() []byte {
+ f.checkValid()
+ return f.headerFragBuf
+}
+
+func (f *PushPromiseFrame) HeadersEnded() bool {
+ return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders)
+}
+
+func parsePushPromise(fh FrameHeader, p []byte) (_ Frame, err error) {
+ pp := &PushPromiseFrame{
+ FrameHeader: fh,
+ }
+ if pp.StreamID == 0 {
+ // PUSH_PROMISE frames MUST be associated with an existing,
+ // peer-initiated stream. The stream identifier of a
+ // PUSH_PROMISE frame indicates the stream it is associated
+ // with. If the stream identifier field specifies the value
+ // 0x0, a recipient MUST respond with a connection error
+ // (Section 5.4.1) of type PROTOCOL_ERROR.
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ // The PUSH_PROMISE frame includes optional padding.
+ // Padding fields and flags are identical to those defined for DATA frames
+ var padLength uint8
+ if fh.Flags.Has(FlagPushPromisePadded) {
+ if p, padLength, err = readByte(p); err != nil {
+ return
+ }
+ }
+
+ p, pp.PromiseID, err = readUint32(p)
+ if err != nil {
+ return
+ }
+ pp.PromiseID = pp.PromiseID & (1<<31 - 1)
+
+ if int(padLength) > len(p) {
+ // like the DATA frame, error out if padding is longer than the body.
+ return nil, ConnectionError(ErrCodeProtocol)
+ }
+ pp.headerFragBuf = p[:len(p)-int(padLength)]
+ return pp, nil
+}
+
+// PushPromiseParam are the parameters for writing a PUSH_PROMISE frame.
+type PushPromiseParam struct {
+ // StreamID is the required Stream ID to initiate.
+ StreamID uint32
+
+ // PromiseID is the required Stream ID which this
+ // Push Promises
+ PromiseID uint32
+
+ // BlockFragment is part (or all) of a Header Block.
+ BlockFragment []byte
+
+ // EndHeaders indicates that this frame contains an entire
+ // header block and is not followed by any
+ // CONTINUATION frames.
+ EndHeaders bool
+
+ // PadLength is the optional number of bytes of zeros to add
+ // to this frame.
+ PadLength uint8
+}
+
+// WritePushPromise writes a single PushPromise Frame.
+//
+// As with Header Frames, This is the low level call for writing
+// individual frames. Continuation frames are handled elsewhere.
+//
+// It will perform exactly one Write to the underlying Writer.
+// It is the caller's responsibility to not call other Write methods concurrently.
+func (f *Framer) WritePushPromise(p PushPromiseParam) error {
+ if !validStreamID(p.StreamID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ var flags Flags
+ if p.PadLength != 0 {
+ flags |= FlagPushPromisePadded
+ }
+ if p.EndHeaders {
+ flags |= FlagPushPromiseEndHeaders
+ }
+ f.startWrite(FramePushPromise, flags, p.StreamID)
+ if p.PadLength != 0 {
+ f.writeByte(p.PadLength)
+ }
+ if !validStreamID(p.PromiseID) && !f.AllowIllegalWrites {
+ return errStreamID
+ }
+ f.writeUint32(p.PromiseID)
+ f.wbuf = append(f.wbuf, p.BlockFragment...)
+ f.wbuf = append(f.wbuf, padZeros[:p.PadLength]...)
+ return f.endWrite()
+}
+
+// WriteRawFrame writes a raw frame. This can be used to write
+// extension frames unknown to this package.
+func (f *Framer) WriteRawFrame(t FrameType, flags Flags, streamID uint32, payload []byte) error {
+ f.startWrite(t, flags, streamID)
+ f.writeBytes(payload)
+ return f.endWrite()
+}
+
+func readByte(p []byte) (remain []byte, b byte, err error) {
+ if len(p) == 0 {
+ return nil, 0, io.ErrUnexpectedEOF
+ }
+ return p[1:], p[0], nil
+}
+
+func readUint32(p []byte) (remain []byte, v uint32, err error) {
+ if len(p) < 4 {
+ return nil, 0, io.ErrUnexpectedEOF
+ }
+ return p[4:], binary.BigEndian.Uint32(p[:4]), nil
+}
+
+type streamEnder interface {
+ StreamEnded() bool
+}
+
+type headersEnder interface {
+ HeadersEnded() bool
+}
+
+type headersOrContinuation interface {
+ headersEnder
+ HeaderBlockFragment() []byte
+}
+
+// A MetaHeadersFrame is the representation of one HEADERS frame and
+// zero or more contiguous CONTINUATION frames and the decoding of
+// their HPACK-encoded contents.
+//
+// This type of frame does not appear on the wire and is only returned
+// by the Framer when Framer.ReadMetaHeaders is set.
+type MetaHeadersFrame struct {
+ *HeadersFrame
+
+ // Fields are the fields contained in the HEADERS and
+ // CONTINUATION frames. The underlying slice is owned by the
+ // Framer and must not be retained after the next call to
+ // ReadFrame.
+ //
+ // Fields are guaranteed to be in the correct http2 order and
+ // not have unknown pseudo header fields or invalid header
+ // field names or values. Required pseudo header fields may be
+ // missing, however. Use the MetaHeadersFrame.Pseudo accessor
+ // method access pseudo headers.
+ Fields []hpack.HeaderField
+
+ // Truncated is whether the max header list size limit was hit
+ // and Fields is incomplete. The hpack decoder state is still
+ // valid, however.
+ Truncated bool
+}
+
+// PseudoValue returns the given pseudo header field's value.
+// The provided pseudo field should not contain the leading colon.
+func (mh *MetaHeadersFrame) PseudoValue(pseudo string) string {
+ for _, hf := range mh.Fields {
+ if !hf.IsPseudo() {
+ return ""
+ }
+ if hf.Name[1:] == pseudo {
+ return hf.Value
+ }
+ }
+ return ""
+}
+
+// RegularFields returns the regular (non-pseudo) header fields of mh.
+// The caller does not own the returned slice.
+func (mh *MetaHeadersFrame) RegularFields() []hpack.HeaderField {
+ for i, hf := range mh.Fields {
+ if !hf.IsPseudo() {
+ return mh.Fields[i:]
+ }
+ }
+ return nil
+}
+
+// PseudoFields returns the pseudo header fields of mh.
+// The caller does not own the returned slice.
+func (mh *MetaHeadersFrame) PseudoFields() []hpack.HeaderField {
+ for i, hf := range mh.Fields {
+ if !hf.IsPseudo() {
+ return mh.Fields[:i]
+ }
+ }
+ return mh.Fields
+}
+
+func (mh *MetaHeadersFrame) checkPseudos() error {
+ var isRequest, isResponse bool
+ pf := mh.PseudoFields()
+ for i, hf := range pf {
+ switch hf.Name {
+ case ":method", ":path", ":scheme", ":authority":
+ isRequest = true
+ case ":status":
+ isResponse = true
+ default:
+ return pseudoHeaderError(hf.Name)
+ }
+ // Check for duplicates.
+ // This would be a bad algorithm, but N is 4.
+ // And this doesn't allocate.
+ for _, hf2 := range pf[:i] {
+ if hf.Name == hf2.Name {
+ return duplicatePseudoHeaderError(hf.Name)
+ }
+ }
+ }
+ if isRequest && isResponse {
+ return errMixPseudoHeaderTypes
+ }
+ return nil
+}
+
+func (fr *Framer) maxHeaderStringLen() int {
+ v := fr.maxHeaderListSize()
+ if uint32(int(v)) == v {
+ return int(v)
+ }
+ // They had a crazy big number for MaxHeaderBytes anyway,
+ // so give them unlimited header lengths:
+ return 0
+}
+
+// readMetaFrame returns 0 or more CONTINUATION frames from fr and
+// merge them into into the provided hf and returns a MetaHeadersFrame
+// with the decoded hpack values.
+func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
+ if fr.AllowIllegalReads {
+ return nil, errors.New("illegal use of AllowIllegalReads with ReadMetaHeaders")
+ }
+ mh := &MetaHeadersFrame{
+ HeadersFrame: hf,
+ }
+ var remainSize = fr.maxHeaderListSize()
+ var sawRegular bool
+
+ var invalid error // pseudo header field errors
+ hdec := fr.ReadMetaHeaders
+ hdec.SetEmitEnabled(true)
+ hdec.SetMaxStringLength(fr.maxHeaderStringLen())
+ hdec.SetEmitFunc(func(hf hpack.HeaderField) {
+ if !httplex.ValidHeaderFieldValue(hf.Value) {
+ invalid = headerFieldValueError(hf.Value)
+ }
+ isPseudo := strings.HasPrefix(hf.Name, ":")
+ if isPseudo {
+ if sawRegular {
+ invalid = errPseudoAfterRegular
+ }
+ } else {
+ sawRegular = true
+ if !validWireHeaderFieldName(hf.Name) {
+ invalid = headerFieldNameError(hf.Name)
+ }
+ }
+
+ if invalid != nil {
+ hdec.SetEmitEnabled(false)
+ return
+ }
+
+ size := hf.Size()
+ if size > remainSize {
+ hdec.SetEmitEnabled(false)
+ mh.Truncated = true
+ return
+ }
+ remainSize -= size
+
+ mh.Fields = append(mh.Fields, hf)
+ })
+ // Lose reference to MetaHeadersFrame:
+ defer hdec.SetEmitFunc(func(hf hpack.HeaderField) {})
+
+ var hc headersOrContinuation = hf
+ for {
+ frag := hc.HeaderBlockFragment()
+ if _, err := hdec.Write(frag); err != nil {
+ return nil, ConnectionError(ErrCodeCompression)
+ }
+
+ if hc.HeadersEnded() {
+ break
+ }
+ if f, err := fr.ReadFrame(); err != nil {
+ return nil, err
+ } else {
+ hc = f.(*ContinuationFrame) // guaranteed by checkFrameOrder
+ }
+ }
+
+ mh.HeadersFrame.headerFragBuf = nil
+ mh.HeadersFrame.invalidate()
+
+ if err := hdec.Close(); err != nil {
+ return nil, ConnectionError(ErrCodeCompression)
+ }
+ if invalid != nil {
+ fr.errDetail = invalid
+ return nil, StreamError{mh.StreamID, ErrCodeProtocol}
+ }
+ if err := mh.checkPseudos(); err != nil {
+ fr.errDetail = err
+ return nil, StreamError{mh.StreamID, ErrCodeProtocol}
+ }
+ return mh, nil
+}
+
+func summarizeFrame(f Frame) string {
+ var buf bytes.Buffer
+ f.Header().writeDebug(&buf)
+ switch f := f.(type) {
+ case *SettingsFrame:
+ n := 0
+ f.ForeachSetting(func(s Setting) error {
+ n++
+ if n == 1 {
+ buf.WriteString(", settings:")
+ }
+ fmt.Fprintf(&buf, " %v=%v,", s.ID, s.Val)
+ return nil
+ })
+ if n > 0 {
+ buf.Truncate(buf.Len() - 1) // remove trailing comma
+ }
+ case *DataFrame:
+ data := f.Data()
+ const max = 256
+ if len(data) > max {
+ data = data[:max]
+ }
+ fmt.Fprintf(&buf, " data=%q", data)
+ if len(f.Data()) > max {
+ fmt.Fprintf(&buf, " (%d bytes omitted)", len(f.Data())-max)
+ }
+ case *WindowUpdateFrame:
+ if f.StreamID == 0 {
+ buf.WriteString(" (conn)")
+ }
+ fmt.Fprintf(&buf, " incr=%v", f.Increment)
+ case *PingFrame:
+ fmt.Fprintf(&buf, " ping=%q", f.Data[:])
+ case *GoAwayFrame:
+ fmt.Fprintf(&buf, " LastStreamID=%v ErrCode=%v Debug=%q",
+ f.LastStreamID, f.ErrCode, f.debugData)
+ case *RSTStreamFrame:
+ fmt.Fprintf(&buf, " ErrCode=%v", f.ErrCode)
+ }
+ return buf.String()
+}
diff --git a/vendor/golang.org/x/net/http2/go16.go b/vendor/golang.org/x/net/http2/go16.go
new file mode 100644
index 0000000..00b2e9e
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/go16.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.6
+
+package http2
+
+import (
+ "net/http"
+ "time"
+)
+
+func transportExpectContinueTimeout(t1 *http.Transport) time.Duration {
+ return t1.ExpectContinueTimeout
+}
diff --git a/vendor/golang.org/x/net/http2/go17.go b/vendor/golang.org/x/net/http2/go17.go
new file mode 100644
index 0000000..730319d
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/go17.go
@@ -0,0 +1,94 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build go1.7
+
+package http2
+
+import (
+ "context"
+ "net"
+ "net/http"
+ "net/http/httptrace"
+ "time"
+)
+
+type contextContext interface {
+ context.Context
+}
+
+func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) {
+ ctx, cancel = context.WithCancel(context.Background())
+ ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr())
+ if hs := opts.baseConfig(); hs != nil {
+ ctx = context.WithValue(ctx, http.ServerContextKey, hs)
+ }
+ return
+}
+
+func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) {
+ return context.WithCancel(ctx)
+}
+
+func requestWithContext(req *http.Request, ctx contextContext) *http.Request {
+ return req.WithContext(ctx)
+}
+
+type clientTrace httptrace.ClientTrace
+
+func reqContext(r *http.Request) context.Context { return r.Context() }
+
+func setResponseUncompressed(res *http.Response) { res.Uncompressed = true }
+
+func traceGotConn(req *http.Request, cc *ClientConn) {
+ trace := httptrace.ContextClientTrace(req.Context())
+ if trace == nil || trace.GotConn == nil {
+ return
+ }
+ ci := httptrace.GotConnInfo{Conn: cc.tconn}
+ cc.mu.Lock()
+ ci.Reused = cc.nextStreamID > 1
+ ci.WasIdle = len(cc.streams) == 0 && ci.Reused
+ if ci.WasIdle && !cc.lastActive.IsZero() {
+ ci.IdleTime = time.Now().Sub(cc.lastActive)
+ }
+ cc.mu.Unlock()
+
+ trace.GotConn(ci)
+}
+
+func traceWroteHeaders(trace *clientTrace) {
+ if trace != nil && trace.WroteHeaders != nil {
+ trace.WroteHeaders()
+ }
+}
+
+func traceGot100Continue(trace *clientTrace) {
+ if trace != nil && trace.Got100Continue != nil {
+ trace.Got100Continue()
+ }
+}
+
+func traceWait100Continue(trace *clientTrace) {
+ if trace != nil && trace.Wait100Continue != nil {
+ trace.Wait100Continue()
+ }
+}
+
+func traceWroteRequest(trace *clientTrace, err error) {
+ if trace != nil && trace.WroteRequest != nil {
+ trace.WroteRequest(httptrace.WroteRequestInfo{Err: err})
+ }
+}
+
+func traceFirstResponseByte(trace *clientTrace) {
+ if trace != nil && trace.GotFirstResponseByte != nil {
+ trace.GotFirstResponseByte()
+ }
+}
+
+func requestTrace(req *http.Request) *clientTrace {
+ trace := httptrace.ContextClientTrace(req.Context())
+ return (*clientTrace)(trace)
+}
diff --git a/vendor/golang.org/x/net/http2/gotrack.go b/vendor/golang.org/x/net/http2/gotrack.go
new file mode 100644
index 0000000..9933c9f
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/gotrack.go
@@ -0,0 +1,170 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Defensive debug-only utility to track that functions run on the
+// goroutine that they're supposed to.
+
+package http2
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "os"
+ "runtime"
+ "strconv"
+ "sync"
+)
+
+var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1"
+
+type goroutineLock uint64
+
+func newGoroutineLock() goroutineLock {
+ if !DebugGoroutines {
+ return 0
+ }
+ return goroutineLock(curGoroutineID())
+}
+
+func (g goroutineLock) check() {
+ if !DebugGoroutines {
+ return
+ }
+ if curGoroutineID() != uint64(g) {
+ panic("running on the wrong goroutine")
+ }
+}
+
+func (g goroutineLock) checkNotOn() {
+ if !DebugGoroutines {
+ return
+ }
+ if curGoroutineID() == uint64(g) {
+ panic("running on the wrong goroutine")
+ }
+}
+
+var goroutineSpace = []byte("goroutine ")
+
+func curGoroutineID() uint64 {
+ bp := littleBuf.Get().(*[]byte)
+ defer littleBuf.Put(bp)
+ b := *bp
+ b = b[:runtime.Stack(b, false)]
+ // Parse the 4707 out of "goroutine 4707 ["
+ b = bytes.TrimPrefix(b, goroutineSpace)
+ i := bytes.IndexByte(b, ' ')
+ if i < 0 {
+ panic(fmt.Sprintf("No space found in %q", b))
+ }
+ b = b[:i]
+ n, err := parseUintBytes(b, 10, 64)
+ if err != nil {
+ panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err))
+ }
+ return n
+}
+
+var littleBuf = sync.Pool{
+ New: func() interface{} {
+ buf := make([]byte, 64)
+ return &buf
+ },
+}
+
+// parseUintBytes is like strconv.ParseUint, but using a []byte.
+func parseUintBytes(s []byte, base int, bitSize int) (n uint64, err error) {
+ var cutoff, maxVal uint64
+
+ if bitSize == 0 {
+ bitSize = int(strconv.IntSize)
+ }
+
+ s0 := s
+ switch {
+ case len(s) < 1:
+ err = strconv.ErrSyntax
+ goto Error
+
+ case 2 <= base && base <= 36:
+ // valid base; nothing to do
+
+ case base == 0:
+ // Look for octal, hex prefix.
+ switch {
+ case s[0] == '0' && len(s) > 1 && (s[1] == 'x' || s[1] == 'X'):
+ base = 16
+ s = s[2:]
+ if len(s) < 1 {
+ err = strconv.ErrSyntax
+ goto Error
+ }
+ case s[0] == '0':
+ base = 8
+ default:
+ base = 10
+ }
+
+ default:
+ err = errors.New("invalid base " + strconv.Itoa(base))
+ goto Error
+ }
+
+ n = 0
+ cutoff = cutoff64(base)
+ maxVal = 1<<uint(bitSize) - 1
+
+ for i := 0; i < len(s); i++ {
+ var v byte
+ d := s[i]
+ switch {
+ case '0' <= d && d <= '9':
+ v = d - '0'
+ case 'a' <= d && d <= 'z':
+ v = d - 'a' + 10
+ case 'A' <= d && d <= 'Z':
+ v = d - 'A' + 10
+ default:
+ n = 0
+ err = strconv.ErrSyntax
+ goto Error
+ }
+ if int(v) >= base {
+ n = 0
+ err = strconv.ErrSyntax
+ goto Error
+ }
+
+ if n >= cutoff {
+ // n*base overflows
+ n = 1<<64 - 1
+ err = strconv.ErrRange
+ goto Error
+ }
+ n *= uint64(base)
+
+ n1 := n + uint64(v)
+ if n1 < n || n1 > maxVal {
+ // n+v overflows
+ n = 1<<64 - 1
+ err = strconv.ErrRange
+ goto Error
+ }
+ n = n1
+ }
+
+ return n, nil
+
+Error:
+ return n, &strconv.NumError{Func: "ParseUint", Num: string(s0), Err: err}
+}
+
+// Return the first number n such that n*base >= 1<<64.
+func cutoff64(base int) uint64 {
+ if base < 2 {
+ return 0
+ }
+ return (1<<64-1)/uint64(base) + 1
+}
diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/http2/headermap.go
new file mode 100644
index 0000000..c2805f6
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/headermap.go
@@ -0,0 +1,78 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "net/http"
+ "strings"
+)
+
+var (
+ commonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case
+ commonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case
+)
+
+func init() {
+ for _, v := range []string{
+ "accept",
+ "accept-charset",
+ "accept-encoding",
+ "accept-language",
+ "accept-ranges",
+ "age",
+ "access-control-allow-origin",
+ "allow",
+ "authorization",
+ "cache-control",
+ "content-disposition",
+ "content-encoding",
+ "content-language",
+ "content-length",
+ "content-location",
+ "content-range",
+ "content-type",
+ "cookie",
+ "date",
+ "etag",
+ "expect",
+ "expires",
+ "from",
+ "host",
+ "if-match",
+ "if-modified-since",
+ "if-none-match",
+ "if-unmodified-since",
+ "last-modified",
+ "link",
+ "location",
+ "max-forwards",
+ "proxy-authenticate",
+ "proxy-authorization",
+ "range",
+ "referer",
+ "refresh",
+ "retry-after",
+ "server",
+ "set-cookie",
+ "strict-transport-security",
+ "trailer",
+ "transfer-encoding",
+ "user-agent",
+ "vary",
+ "via",
+ "www-authenticate",
+ } {
+ chk := http.CanonicalHeaderKey(v)
+ commonLowerHeader[chk] = v
+ commonCanonHeader[v] = chk
+ }
+}
+
+func lowerHeader(v string) string {
+ if s, ok := commonLowerHeader[v]; ok {
+ return s
+ }
+ return strings.ToLower(v)
+}
diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go
new file mode 100644
index 0000000..f9bb033
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/hpack/encode.go
@@ -0,0 +1,251 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package hpack
+
+import (
+ "io"
+)
+
+const (
+ uint32Max = ^uint32(0)
+ initialHeaderTableSize = 4096
+)
+
+type Encoder struct {
+ dynTab dynamicTable
+ // minSize is the minimum table size set by
+ // SetMaxDynamicTableSize after the previous Header Table Size
+ // Update.
+ minSize uint32
+ // maxSizeLimit is the maximum table size this encoder
+ // supports. This will protect the encoder from too large
+ // size.
+ maxSizeLimit uint32
+ // tableSizeUpdate indicates whether "Header Table Size
+ // Update" is required.
+ tableSizeUpdate bool
+ w io.Writer
+ buf []byte
+}
+
+// NewEncoder returns a new Encoder which performs HPACK encoding. An
+// encoded data is written to w.
+func NewEncoder(w io.Writer) *Encoder {
+ e := &Encoder{
+ minSize: uint32Max,
+ maxSizeLimit: initialHeaderTableSize,
+ tableSizeUpdate: false,
+ w: w,
+ }
+ e.dynTab.setMaxSize(initialHeaderTableSize)
+ return e
+}
+
+// WriteField encodes f into a single Write to e's underlying Writer.
+// This function may also produce bytes for "Header Table Size Update"
+// if necessary. If produced, it is done before encoding f.
+func (e *Encoder) WriteField(f HeaderField) error {
+ e.buf = e.buf[:0]
+
+ if e.tableSizeUpdate {
+ e.tableSizeUpdate = false
+ if e.minSize < e.dynTab.maxSize {
+ e.buf = appendTableSize(e.buf, e.minSize)
+ }
+ e.minSize = uint32Max
+ e.buf = appendTableSize(e.buf, e.dynTab.maxSize)
+ }
+
+ idx, nameValueMatch := e.searchTable(f)
+ if nameValueMatch {
+ e.buf = appendIndexed(e.buf, idx)
+ } else {
+ indexing := e.shouldIndex(f)
+ if indexing {
+ e.dynTab.add(f)
+ }
+
+ if idx == 0 {
+ e.buf = appendNewName(e.buf, f, indexing)
+ } else {
+ e.buf = appendIndexedName(e.buf, f, idx, indexing)
+ }
+ }
+ n, err := e.w.Write(e.buf)
+ if err == nil && n != len(e.buf) {
+ err = io.ErrShortWrite
+ }
+ return err
+}
+
+// searchTable searches f in both stable and dynamic header tables.
+// The static header table is searched first. Only when there is no
+// exact match for both name and value, the dynamic header table is
+// then searched. If there is no match, i is 0. If both name and value
+// match, i is the matched index and nameValueMatch becomes true. If
+// only name matches, i points to that index and nameValueMatch
+// becomes false.
+func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) {
+ for idx, hf := range staticTable {
+ if !constantTimeStringCompare(hf.Name, f.Name) {
+ continue
+ }
+ if i == 0 {
+ i = uint64(idx + 1)
+ }
+ if f.Sensitive {
+ continue
+ }
+ if !constantTimeStringCompare(hf.Value, f.Value) {
+ continue
+ }
+ i = uint64(idx + 1)
+ nameValueMatch = true
+ return
+ }
+
+ j, nameValueMatch := e.dynTab.search(f)
+ if nameValueMatch || (i == 0 && j != 0) {
+ i = j + uint64(len(staticTable))
+ }
+ return
+}
+
+// SetMaxDynamicTableSize changes the dynamic header table size to v.
+// The actual size is bounded by the value passed to
+// SetMaxDynamicTableSizeLimit.
+func (e *Encoder) SetMaxDynamicTableSize(v uint32) {
+ if v > e.maxSizeLimit {
+ v = e.maxSizeLimit
+ }
+ if v < e.minSize {
+ e.minSize = v
+ }
+ e.tableSizeUpdate = true
+ e.dynTab.setMaxSize(v)
+}
+
+// SetMaxDynamicTableSizeLimit changes the maximum value that can be
+// specified in SetMaxDynamicTableSize to v. By default, it is set to
+// 4096, which is the same size of the default dynamic header table
+// size described in HPACK specification. If the current maximum
+// dynamic header table size is strictly greater than v, "Header Table
+// Size Update" will be done in the next WriteField call and the
+// maximum dynamic header table size is truncated to v.
+func (e *Encoder) SetMaxDynamicTableSizeLimit(v uint32) {
+ e.maxSizeLimit = v
+ if e.dynTab.maxSize > v {
+ e.tableSizeUpdate = true
+ e.dynTab.setMaxSize(v)
+ }
+}
+
+// shouldIndex reports whether f should be indexed.
+func (e *Encoder) shouldIndex(f HeaderField) bool {
+ return !f.Sensitive && f.Size() <= e.dynTab.maxSize
+}
+
+// appendIndexed appends index i, as encoded in "Indexed Header Field"
+// representation, to dst and returns the extended buffer.
+func appendIndexed(dst []byte, i uint64) []byte {
+ first := len(dst)
+ dst = appendVarInt(dst, 7, i)
+ dst[first] |= 0x80
+ return dst
+}
+
+// appendNewName appends f, as encoded in one of "Literal Header field
+// - New Name" representation variants, to dst and returns the
+// extended buffer.
+//
+// If f.Sensitive is true, "Never Indexed" representation is used. If
+// f.Sensitive is false and indexing is true, "Inremental Indexing"
+// representation is used.
+func appendNewName(dst []byte, f HeaderField, indexing bool) []byte {
+ dst = append(dst, encodeTypeByte(indexing, f.Sensitive))
+ dst = appendHpackString(dst, f.Name)
+ return appendHpackString(dst, f.Value)
+}
+
+// appendIndexedName appends f and index i referring indexed name
+// entry, as encoded in one of "Literal Header field - Indexed Name"
+// representation variants, to dst and returns the extended buffer.
+//
+// If f.Sensitive is true, "Never Indexed" representation is used. If
+// f.Sensitive is false and indexing is true, "Incremental Indexing"
+// representation is used.
+func appendIndexedName(dst []byte, f HeaderField, i uint64, indexing bool) []byte {
+ first := len(dst)
+ var n byte
+ if indexing {
+ n = 6
+ } else {
+ n = 4
+ }
+ dst = appendVarInt(dst, n, i)
+ dst[first] |= encodeTypeByte(indexing, f.Sensitive)
+ return appendHpackString(dst, f.Value)
+}
+
+// appendTableSize appends v, as encoded in "Header Table Size Update"
+// representation, to dst and returns the extended buffer.
+func appendTableSize(dst []byte, v uint32) []byte {
+ first := len(dst)
+ dst = appendVarInt(dst, 5, uint64(v))
+ dst[first] |= 0x20
+ return dst
+}
+
+// appendVarInt appends i, as encoded in variable integer form using n
+// bit prefix, to dst and returns the extended buffer.
+//
+// See
+// http://http2.github.io/http2-spec/compression.html#integer.representation
+func appendVarInt(dst []byte, n byte, i uint64) []byte {
+ k := uint64((1 << n) - 1)
+ if i < k {
+ return append(dst, byte(i))
+ }
+ dst = append(dst, byte(k))
+ i -= k
+ for ; i >= 128; i >>= 7 {
+ dst = append(dst, byte(0x80|(i&0x7f)))
+ }
+ return append(dst, byte(i))
+}
+
+// appendHpackString appends s, as encoded in "String Literal"
+// representation, to dst and returns the the extended buffer.
+//
+// s will be encoded in Huffman codes only when it produces strictly
+// shorter byte string.
+func appendHpackString(dst []byte, s string) []byte {
+ huffmanLength := HuffmanEncodeLength(s)
+ if huffmanLength < uint64(len(s)) {
+ first := len(dst)
+ dst = appendVarInt(dst, 7, huffmanLength)
+ dst = AppendHuffmanString(dst, s)
+ dst[first] |= 0x80
+ } else {
+ dst = appendVarInt(dst, 7, uint64(len(s)))
+ dst = append(dst, s...)
+ }
+ return dst
+}
+
+// encodeTypeByte returns type byte. If sensitive is true, type byte
+// for "Never Indexed" representation is returned. If sensitive is
+// false and indexing is true, type byte for "Incremental Indexing"
+// representation is returned. Otherwise, type byte for "Without
+// Indexing" is returned.
+func encodeTypeByte(indexing, sensitive bool) byte {
+ if sensitive {
+ return 0x10
+ }
+ if indexing {
+ return 0x40
+ }
+ return 0
+}
diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go
new file mode 100644
index 0000000..8aa197a
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/hpack/hpack.go
@@ -0,0 +1,542 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package hpack implements HPACK, a compression format for
+// efficiently representing HTTP header fields in the context of HTTP/2.
+//
+// See http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-09
+package hpack
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+)
+
+// A DecodingError is something the spec defines as a decoding error.
+type DecodingError struct {
+ Err error
+}
+
+func (de DecodingError) Error() string {
+ return fmt.Sprintf("decoding error: %v", de.Err)
+}
+
+// An InvalidIndexError is returned when an encoder references a table
+// entry before the static table or after the end of the dynamic table.
+type InvalidIndexError int
+
+func (e InvalidIndexError) Error() string {
+ return fmt.Sprintf("invalid indexed representation index %d", int(e))
+}
+
+// A HeaderField is a name-value pair. Both the name and value are
+// treated as opaque sequences of octets.
+type HeaderField struct {
+ Name, Value string
+
+ // Sensitive means that this header field should never be
+ // indexed.
+ Sensitive bool
+}
+
+// IsPseudo reports whether the header field is an http2 pseudo header.
+// That is, it reports whether it starts with a colon.
+// It is not otherwise guaranteed to be a valid pseudo header field,
+// though.
+func (hf HeaderField) IsPseudo() bool {
+ return len(hf.Name) != 0 && hf.Name[0] == ':'
+}
+
+func (hf HeaderField) String() string {
+ var suffix string
+ if hf.Sensitive {
+ suffix = " (sensitive)"
+ }
+ return fmt.Sprintf("header field %q = %q%s", hf.Name, hf.Value, suffix)
+}
+
+// Size returns the size of an entry per RFC 7540 section 5.2.
+func (hf HeaderField) Size() uint32 {
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1
+ // "The size of the dynamic table is the sum of the size of
+ // its entries. The size of an entry is the sum of its name's
+ // length in octets (as defined in Section 5.2), its value's
+ // length in octets (see Section 5.2), plus 32. The size of
+ // an entry is calculated using the length of the name and
+ // value without any Huffman encoding applied."
+
+ // This can overflow if somebody makes a large HeaderField
+ // Name and/or Value by hand, but we don't care, because that
+ // won't happen on the wire because the encoding doesn't allow
+ // it.
+ return uint32(len(hf.Name) + len(hf.Value) + 32)
+}
+
+// A Decoder is the decoding context for incremental processing of
+// header blocks.
+type Decoder struct {
+ dynTab dynamicTable
+ emit func(f HeaderField)
+
+ emitEnabled bool // whether calls to emit are enabled
+ maxStrLen int // 0 means unlimited
+
+ // buf is the unparsed buffer. It's only written to
+ // saveBuf if it was truncated in the middle of a header
+ // block. Because it's usually not owned, we can only
+ // process it under Write.
+ buf []byte // not owned; only valid during Write
+
+ // saveBuf is previous data passed to Write which we weren't able
+ // to fully parse before. Unlike buf, we own this data.
+ saveBuf bytes.Buffer
+}
+
+// NewDecoder returns a new decoder with the provided maximum dynamic
+// table size. The emitFunc will be called for each valid field
+// parsed, in the same goroutine as calls to Write, before Write returns.
+func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decoder {
+ d := &Decoder{
+ emit: emitFunc,
+ emitEnabled: true,
+ }
+ d.dynTab.allowedMaxSize = maxDynamicTableSize
+ d.dynTab.setMaxSize(maxDynamicTableSize)
+ return d
+}
+
+// ErrStringLength is returned by Decoder.Write when the max string length
+// (as configured by Decoder.SetMaxStringLength) would be violated.
+var ErrStringLength = errors.New("hpack: string too long")
+
+// SetMaxStringLength sets the maximum size of a HeaderField name or
+// value string. If a string exceeds this length (even after any
+// decompression), Write will return ErrStringLength.
+// A value of 0 means unlimited and is the default from NewDecoder.
+func (d *Decoder) SetMaxStringLength(n int) {
+ d.maxStrLen = n
+}
+
+// SetEmitFunc changes the callback used when new header fields
+// are decoded.
+// It must be non-nil. It does not affect EmitEnabled.
+func (d *Decoder) SetEmitFunc(emitFunc func(f HeaderField)) {
+ d.emit = emitFunc
+}
+
+// SetEmitEnabled controls whether the emitFunc provided to NewDecoder
+// should be called. The default is true.
+//
+// This facility exists to let servers enforce MAX_HEADER_LIST_SIZE
+// while still decoding and keeping in-sync with decoder state, but
+// without doing unnecessary decompression or generating unnecessary
+// garbage for header fields past the limit.
+func (d *Decoder) SetEmitEnabled(v bool) { d.emitEnabled = v }
+
+// EmitEnabled reports whether calls to the emitFunc provided to NewDecoder
+// are currently enabled. The default is true.
+func (d *Decoder) EmitEnabled() bool { return d.emitEnabled }
+
+// TODO: add method *Decoder.Reset(maxSize, emitFunc) to let callers re-use Decoders and their
+// underlying buffers for garbage reasons.
+
+func (d *Decoder) SetMaxDynamicTableSize(v uint32) {
+ d.dynTab.setMaxSize(v)
+}
+
+// SetAllowedMaxDynamicTableSize sets the upper bound that the encoded
+// stream (via dynamic table size updates) may set the maximum size
+// to.
+func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) {
+ d.dynTab.allowedMaxSize = v
+}
+
+type dynamicTable struct {
+ // ents is the FIFO described at
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2
+ // The newest (low index) is append at the end, and items are
+ // evicted from the front.
+ ents []HeaderField
+ size uint32
+ maxSize uint32 // current maxSize
+ allowedMaxSize uint32 // maxSize may go up to this, inclusive
+}
+
+func (dt *dynamicTable) setMaxSize(v uint32) {
+ dt.maxSize = v
+ dt.evict()
+}
+
+// TODO: change dynamicTable to be a struct with a slice and a size int field,
+// per http://http2.github.io/http2-spec/compression.html#rfc.section.4.1:
+//
+//
+// Then make add increment the size. maybe the max size should move from Decoder to
+// dynamicTable and add should return an ok bool if there was enough space.
+//
+// Later we'll need a remove operation on dynamicTable.
+
+func (dt *dynamicTable) add(f HeaderField) {
+ dt.ents = append(dt.ents, f)
+ dt.size += f.Size()
+ dt.evict()
+}
+
+// If we're too big, evict old stuff (front of the slice)
+func (dt *dynamicTable) evict() {
+ base := dt.ents // keep base pointer of slice
+ for dt.size > dt.maxSize {
+ dt.size -= dt.ents[0].Size()
+ dt.ents = dt.ents[1:]
+ }
+
+ // Shift slice contents down if we evicted things.
+ if len(dt.ents) != len(base) {
+ copy(base, dt.ents)
+ dt.ents = base[:len(dt.ents)]
+ }
+}
+
+// constantTimeStringCompare compares string a and b in a constant
+// time manner.
+func constantTimeStringCompare(a, b string) bool {
+ if len(a) != len(b) {
+ return false
+ }
+
+ c := byte(0)
+
+ for i := 0; i < len(a); i++ {
+ c |= a[i] ^ b[i]
+ }
+
+ return c == 0
+}
+
+// Search searches f in the table. The return value i is 0 if there is
+// no name match. If there is name match or name/value match, i is the
+// index of that entry (1-based). If both name and value match,
+// nameValueMatch becomes true.
+func (dt *dynamicTable) search(f HeaderField) (i uint64, nameValueMatch bool) {
+ l := len(dt.ents)
+ for j := l - 1; j >= 0; j-- {
+ ent := dt.ents[j]
+ if !constantTimeStringCompare(ent.Name, f.Name) {
+ continue
+ }
+ if i == 0 {
+ i = uint64(l - j)
+ }
+ if f.Sensitive {
+ continue
+ }
+ if !constantTimeStringCompare(ent.Value, f.Value) {
+ continue
+ }
+ i = uint64(l - j)
+ nameValueMatch = true
+ return
+ }
+ return
+}
+
+func (d *Decoder) maxTableIndex() int {
+ return len(d.dynTab.ents) + len(staticTable)
+}
+
+func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) {
+ if i < 1 {
+ return
+ }
+ if i > uint64(d.maxTableIndex()) {
+ return
+ }
+ if i <= uint64(len(staticTable)) {
+ return staticTable[i-1], true
+ }
+ dents := d.dynTab.ents
+ return dents[len(dents)-(int(i)-len(staticTable))], true
+}
+
+// Decode decodes an entire block.
+//
+// TODO: remove this method and make it incremental later? This is
+// easier for debugging now.
+func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) {
+ var hf []HeaderField
+ saveFunc := d.emit
+ defer func() { d.emit = saveFunc }()
+ d.emit = func(f HeaderField) { hf = append(hf, f) }
+ if _, err := d.Write(p); err != nil {
+ return nil, err
+ }
+ if err := d.Close(); err != nil {
+ return nil, err
+ }
+ return hf, nil
+}
+
+func (d *Decoder) Close() error {
+ if d.saveBuf.Len() > 0 {
+ d.saveBuf.Reset()
+ return DecodingError{errors.New("truncated headers")}
+ }
+ return nil
+}
+
+func (d *Decoder) Write(p []byte) (n int, err error) {
+ if len(p) == 0 {
+ // Prevent state machine CPU attacks (making us redo
+ // work up to the point of finding out we don't have
+ // enough data)
+ return
+ }
+ // Only copy the data if we have to. Optimistically assume
+ // that p will contain a complete header block.
+ if d.saveBuf.Len() == 0 {
+ d.buf = p
+ } else {
+ d.saveBuf.Write(p)
+ d.buf = d.saveBuf.Bytes()
+ d.saveBuf.Reset()
+ }
+
+ for len(d.buf) > 0 {
+ err = d.parseHeaderFieldRepr()
+ if err == errNeedMore {
+ // Extra paranoia, making sure saveBuf won't
+ // get too large. All the varint and string
+ // reading code earlier should already catch
+ // overlong things and return ErrStringLength,
+ // but keep this as a last resort.
+ const varIntOverhead = 8 // conservative
+ if d.maxStrLen != 0 && int64(len(d.buf)) > 2*(int64(d.maxStrLen)+varIntOverhead) {
+ return 0, ErrStringLength
+ }
+ d.saveBuf.Write(d.buf)
+ return len(p), nil
+ }
+ if err != nil {
+ break
+ }
+ }
+ return len(p), err
+}
+
+// errNeedMore is an internal sentinel error value that means the
+// buffer is truncated and we need to read more data before we can
+// continue parsing.
+var errNeedMore = errors.New("need more data")
+
+type indexType int
+
+const (
+ indexedTrue indexType = iota
+ indexedFalse
+ indexedNever
+)
+
+func (v indexType) indexed() bool { return v == indexedTrue }
+func (v indexType) sensitive() bool { return v == indexedNever }
+
+// returns errNeedMore if there isn't enough data available.
+// any other error is fatal.
+// consumes d.buf iff it returns nil.
+// precondition: must be called with len(d.buf) > 0
+func (d *Decoder) parseHeaderFieldRepr() error {
+ b := d.buf[0]
+ switch {
+ case b&128 != 0:
+ // Indexed representation.
+ // High bit set?
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.6.1
+ return d.parseFieldIndexed()
+ case b&192 == 64:
+ // 6.2.1 Literal Header Field with Incremental Indexing
+ // 0b10xxxxxx: top two bits are 10
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.1
+ return d.parseFieldLiteral(6, indexedTrue)
+ case b&240 == 0:
+ // 6.2.2 Literal Header Field without Indexing
+ // 0b0000xxxx: top four bits are 0000
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.2
+ return d.parseFieldLiteral(4, indexedFalse)
+ case b&240 == 16:
+ // 6.2.3 Literal Header Field never Indexed
+ // 0b0001xxxx: top four bits are 0001
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.6.2.3
+ return d.parseFieldLiteral(4, indexedNever)
+ case b&224 == 32:
+ // 6.3 Dynamic Table Size Update
+ // Top three bits are '001'.
+ // http://http2.github.io/http2-spec/compression.html#rfc.section.6.3
+ return d.parseDynamicTableSizeUpdate()
+ }
+
+ return DecodingError{errors.New("invalid encoding")}
+}
+
+// (same invariants and behavior as parseHeaderFieldRepr)
+func (d *Decoder) parseFieldIndexed() error {
+ buf := d.buf
+ idx, buf, err := readVarInt(7, buf)
+ if err != nil {
+ return err
+ }
+ hf, ok := d.at(idx)
+ if !ok {
+ return DecodingError{InvalidIndexError(idx)}
+ }
+ d.buf = buf
+ return d.callEmit(HeaderField{Name: hf.Name, Value: hf.Value})
+}
+
+// (same invariants and behavior as parseHeaderFieldRepr)
+func (d *Decoder) parseFieldLiteral(n uint8, it indexType) error {
+ buf := d.buf
+ nameIdx, buf, err := readVarInt(n, buf)
+ if err != nil {
+ return err
+ }
+
+ var hf HeaderField
+ wantStr := d.emitEnabled || it.indexed()
+ if nameIdx > 0 {
+ ihf, ok := d.at(nameIdx)
+ if !ok {
+ return DecodingError{InvalidIndexError(nameIdx)}
+ }
+ hf.Name = ihf.Name
+ } else {
+ hf.Name, buf, err = d.readString(buf, wantStr)
+ if err != nil {
+ return err
+ }
+ }
+ hf.Value, buf, err = d.readString(buf, wantStr)
+ if err != nil {
+ return err
+ }
+ d.buf = buf
+ if it.indexed() {
+ d.dynTab.add(hf)
+ }
+ hf.Sensitive = it.sensitive()
+ return d.callEmit(hf)
+}
+
+func (d *Decoder) callEmit(hf HeaderField) error {
+ if d.maxStrLen != 0 {
+ if len(hf.Name) > d.maxStrLen || len(hf.Value) > d.maxStrLen {
+ return ErrStringLength
+ }
+ }
+ if d.emitEnabled {
+ d.emit(hf)
+ }
+ return nil
+}
+
+// (same invariants and behavior as parseHeaderFieldRepr)
+func (d *Decoder) parseDynamicTableSizeUpdate() error {
+ buf := d.buf
+ size, buf, err := readVarInt(5, buf)
+ if err != nil {
+ return err
+ }
+ if size > uint64(d.dynTab.allowedMaxSize) {
+ return DecodingError{errors.New("dynamic table size update too large")}
+ }
+ d.dynTab.setMaxSize(uint32(size))
+ d.buf = buf
+ return nil
+}
+
+var errVarintOverflow = DecodingError{errors.New("varint integer overflow")}
+
+// readVarInt reads an unsigned variable length integer off the
+// beginning of p. n is the parameter as described in
+// http://http2.github.io/http2-spec/compression.html#rfc.section.5.1.
+//
+// n must always be between 1 and 8.
+//
+// The returned remain buffer is either a smaller suffix of p, or err != nil.
+// The error is errNeedMore if p doesn't contain a complete integer.
+func readVarInt(n byte, p []byte) (i uint64, remain []byte, err error) {
+ if n < 1 || n > 8 {
+ panic("bad n")
+ }
+ if len(p) == 0 {
+ return 0, p, errNeedMore
+ }
+ i = uint64(p[0])
+ if n < 8 {
+ i &= (1 << uint64(n)) - 1
+ }
+ if i < (1<<uint64(n))-1 {
+ return i, p[1:], nil
+ }
+
+ origP := p
+ p = p[1:]
+ var m uint64
+ for len(p) > 0 {
+ b := p[0]
+ p = p[1:]
+ i += uint64(b&127) << m
+ if b&128 == 0 {
+ return i, p, nil
+ }
+ m += 7
+ if m >= 63 { // TODO: proper overflow check. making this up.
+ return 0, origP, errVarintOverflow
+ }
+ }
+ return 0, origP, errNeedMore
+}
+
+// readString decodes an hpack string from p.
+//
+// wantStr is whether s will be used. If false, decompression and
+// []byte->string garbage are skipped if s will be ignored
+// anyway. This does mean that huffman decoding errors for non-indexed
+// strings past the MAX_HEADER_LIST_SIZE are ignored, but the server
+// is returning an error anyway, and because they're not indexed, the error
+// won't affect the decoding state.
+func (d *Decoder) readString(p []byte, wantStr bool) (s string, remain []byte, err error) {
+ if len(p) == 0 {
+ return "", p, errNeedMore
+ }
+ isHuff := p[0]&128 != 0
+ strLen, p, err := readVarInt(7, p)
+ if err != nil {
+ return "", p, err
+ }
+ if d.maxStrLen != 0 && strLen > uint64(d.maxStrLen) {
+ return "", nil, ErrStringLength
+ }
+ if uint64(len(p)) < strLen {
+ return "", p, errNeedMore
+ }
+ if !isHuff {
+ if wantStr {
+ s = string(p[:strLen])
+ }
+ return s, p[strLen:], nil
+ }
+
+ if wantStr {
+ buf := bufPool.Get().(*bytes.Buffer)
+ buf.Reset() // don't trust others
+ defer bufPool.Put(buf)
+ if err := huffmanDecode(buf, d.maxStrLen, p[:strLen]); err != nil {
+ buf.Reset()
+ return "", nil, err
+ }
+ s = buf.String()
+ buf.Reset() // be nice to GC
+ }
+ return s, p[strLen:], nil
+}
diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go
new file mode 100644
index 0000000..8850e39
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/hpack/huffman.go
@@ -0,0 +1,212 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package hpack
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "sync"
+)
+
+var bufPool = sync.Pool{
+ New: func() interface{} { return new(bytes.Buffer) },
+}
+
+// HuffmanDecode decodes the string in v and writes the expanded
+// result to w, returning the number of bytes written to w and the
+// Write call's return value. At most one Write call is made.
+func HuffmanDecode(w io.Writer, v []byte) (int, error) {
+ buf := bufPool.Get().(*bytes.Buffer)
+ buf.Reset()
+ defer bufPool.Put(buf)
+ if err := huffmanDecode(buf, 0, v); err != nil {
+ return 0, err
+ }
+ return w.Write(buf.Bytes())
+}
+
+// HuffmanDecodeToString decodes the string in v.
+func HuffmanDecodeToString(v []byte) (string, error) {
+ buf := bufPool.Get().(*bytes.Buffer)
+ buf.Reset()
+ defer bufPool.Put(buf)
+ if err := huffmanDecode(buf, 0, v); err != nil {
+ return "", err
+ }
+ return buf.String(), nil
+}
+
+// ErrInvalidHuffman is returned for errors found decoding
+// Huffman-encoded strings.
+var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data")
+
+// huffmanDecode decodes v to buf.
+// If maxLen is greater than 0, attempts to write more to buf than
+// maxLen bytes will return ErrStringLength.
+func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error {
+ n := rootHuffmanNode
+ // cur is the bit buffer that has not been fed into n.
+ // cbits is the number of low order bits in cur that are valid.
+ // sbits is the number of bits of the symbol prefix being decoded.
+ cur, cbits, sbits := uint(0), uint8(0), uint8(0)
+ for _, b := range v {
+ cur = cur<<8 | uint(b)
+ cbits += 8
+ sbits += 8
+ for cbits >= 8 {
+ idx := byte(cur >> (cbits - 8))
+ n = n.children[idx]
+ if n == nil {
+ return ErrInvalidHuffman
+ }
+ if n.children == nil {
+ if maxLen != 0 && buf.Len() == maxLen {
+ return ErrStringLength
+ }
+ buf.WriteByte(n.sym)
+ cbits -= n.codeLen
+ n = rootHuffmanNode
+ sbits = cbits
+ } else {
+ cbits -= 8
+ }
+ }
+ }
+ for cbits > 0 {
+ n = n.children[byte(cur<<(8-cbits))]
+ if n == nil {
+ return ErrInvalidHuffman
+ }
+ if n.children != nil || n.codeLen > cbits {
+ break
+ }
+ if maxLen != 0 && buf.Len() == maxLen {
+ return ErrStringLength
+ }
+ buf.WriteByte(n.sym)
+ cbits -= n.codeLen
+ n = rootHuffmanNode
+ sbits = cbits
+ }
+ if sbits > 7 {
+ // Either there was an incomplete symbol, or overlong padding.
+ // Both are decoding errors per RFC 7541 section 5.2.
+ return ErrInvalidHuffman
+ }
+ if mask := uint(1<<cbits - 1); cur&mask != mask {
+ // Trailing bits must be a prefix of EOS per RFC 7541 section 5.2.
+ return ErrInvalidHuffman
+ }
+
+ return nil
+}
+
+type node struct {
+ // children is non-nil for internal nodes
+ children []*node
+
+ // The following are only valid if children is nil:
+ codeLen uint8 // number of bits that led to the output of sym
+ sym byte // output symbol
+}
+
+func newInternalNode() *node {
+ return &node{children: make([]*node, 256)}
+}
+
+var rootHuffmanNode = newInternalNode()
+
+func init() {
+ if len(huffmanCodes) != 256 {
+ panic("unexpected size")
+ }
+ for i, code := range huffmanCodes {
+ addDecoderNode(byte(i), code, huffmanCodeLen[i])
+ }
+}
+
+func addDecoderNode(sym byte, code uint32, codeLen uint8) {
+ cur := rootHuffmanNode
+ for codeLen > 8 {
+ codeLen -= 8
+ i := uint8(code >> codeLen)
+ if cur.children[i] == nil {
+ cur.children[i] = newInternalNode()
+ }
+ cur = cur.children[i]
+ }
+ shift := 8 - codeLen
+ start, end := int(uint8(code<<shift)), int(1<<shift)
+ for i := start; i < start+end; i++ {
+ cur.children[i] = &node{sym: sym, codeLen: codeLen}
+ }
+}
+
+// AppendHuffmanString appends s, as encoded in Huffman codes, to dst
+// and returns the extended buffer.
+func AppendHuffmanString(dst []byte, s string) []byte {
+ rembits := uint8(8)
+
+ for i := 0; i < len(s); i++ {
+ if rembits == 8 {
+ dst = append(dst, 0)
+ }
+ dst, rembits = appendByteToHuffmanCode(dst, rembits, s[i])
+ }
+
+ if rembits < 8 {
+ // special EOS symbol
+ code := uint32(0x3fffffff)
+ nbits := uint8(30)
+
+ t := uint8(code >> (nbits - rembits))
+ dst[len(dst)-1] |= t
+ }
+
+ return dst
+}
+
+// HuffmanEncodeLength returns the number of bytes required to encode
+// s in Huffman codes. The result is round up to byte boundary.
+func HuffmanEncodeLength(s string) uint64 {
+ n := uint64(0)
+ for i := 0; i < len(s); i++ {
+ n += uint64(huffmanCodeLen[s[i]])
+ }
+ return (n + 7) / 8
+}
+
+// appendByteToHuffmanCode appends Huffman code for c to dst and
+// returns the extended buffer and the remaining bits in the last
+// element. The appending is not byte aligned and the remaining bits
+// in the last element of dst is given in rembits.
+func appendByteToHuffmanCode(dst []byte, rembits uint8, c byte) ([]byte, uint8) {
+ code := huffmanCodes[c]
+ nbits := huffmanCodeLen[c]
+
+ for {
+ if rembits > nbits {
+ t := uint8(code << (rembits - nbits))
+ dst[len(dst)-1] |= t
+ rembits -= nbits
+ break
+ }
+
+ t := uint8(code >> (nbits - rembits))
+ dst[len(dst)-1] |= t
+
+ nbits -= rembits
+ rembits = 8
+
+ if nbits == 0 {
+ break
+ }
+
+ dst = append(dst, 0)
+ }
+
+ return dst, rembits
+}
diff --git a/vendor/golang.org/x/net/http2/hpack/tables.go b/vendor/golang.org/x/net/http2/hpack/tables.go
new file mode 100644
index 0000000..b9283a0
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/hpack/tables.go
@@ -0,0 +1,352 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package hpack
+
+func pair(name, value string) HeaderField {
+ return HeaderField{Name: name, Value: value}
+}
+
+// http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B
+var staticTable = [...]HeaderField{
+ pair(":authority", ""), // index 1 (1-based)
+ pair(":method", "GET"),
+ pair(":method", "POST"),
+ pair(":path", "/"),
+ pair(":path", "/index.html"),
+ pair(":scheme", "http"),
+ pair(":scheme", "https"),
+ pair(":status", "200"),
+ pair(":status", "204"),
+ pair(":status", "206"),
+ pair(":status", "304"),
+ pair(":status", "400"),
+ pair(":status", "404"),
+ pair(":status", "500"),
+ pair("accept-charset", ""),
+ pair("accept-encoding", "gzip, deflate"),
+ pair("accept-language", ""),
+ pair("accept-ranges", ""),
+ pair("accept", ""),
+ pair("access-control-allow-origin", ""),
+ pair("age", ""),
+ pair("allow", ""),
+ pair("authorization", ""),
+ pair("cache-control", ""),
+ pair("content-disposition", ""),
+ pair("content-encoding", ""),
+ pair("content-language", ""),
+ pair("content-length", ""),
+ pair("content-location", ""),
+ pair("content-range", ""),
+ pair("content-type", ""),
+ pair("cookie", ""),
+ pair("date", ""),
+ pair("etag", ""),
+ pair("expect", ""),
+ pair("expires", ""),
+ pair("from", ""),
+ pair("host", ""),
+ pair("if-match", ""),
+ pair("if-modified-since", ""),
+ pair("if-none-match", ""),
+ pair("if-range", ""),
+ pair("if-unmodified-since", ""),
+ pair("last-modified", ""),
+ pair("link", ""),
+ pair("location", ""),
+ pair("max-forwards", ""),
+ pair("proxy-authenticate", ""),
+ pair("proxy-authorization", ""),
+ pair("range", ""),
+ pair("referer", ""),
+ pair("refresh", ""),
+ pair("retry-after", ""),
+ pair("server", ""),
+ pair("set-cookie", ""),
+ pair("strict-transport-security", ""),
+ pair("transfer-encoding", ""),
+ pair("user-agent", ""),
+ pair("vary", ""),
+ pair("via", ""),
+ pair("www-authenticate", ""),
+}
+
+var huffmanCodes = [256]uint32{
+ 0x1ff8,
+ 0x7fffd8,
+ 0xfffffe2,
+ 0xfffffe3,
+ 0xfffffe4,
+ 0xfffffe5,
+ 0xfffffe6,
+ 0xfffffe7,
+ 0xfffffe8,
+ 0xffffea,
+ 0x3ffffffc,
+ 0xfffffe9,
+ 0xfffffea,
+ 0x3ffffffd,
+ 0xfffffeb,
+ 0xfffffec,
+ 0xfffffed,
+ 0xfffffee,
+ 0xfffffef,
+ 0xffffff0,
+ 0xffffff1,
+ 0xffffff2,
+ 0x3ffffffe,
+ 0xffffff3,
+ 0xffffff4,
+ 0xffffff5,
+ 0xffffff6,
+ 0xffffff7,
+ 0xffffff8,
+ 0xffffff9,
+ 0xffffffa,
+ 0xffffffb,
+ 0x14,
+ 0x3f8,
+ 0x3f9,
+ 0xffa,
+ 0x1ff9,
+ 0x15,
+ 0xf8,
+ 0x7fa,
+ 0x3fa,
+ 0x3fb,
+ 0xf9,
+ 0x7fb,
+ 0xfa,
+ 0x16,
+ 0x17,
+ 0x18,
+ 0x0,
+ 0x1,
+ 0x2,
+ 0x19,
+ 0x1a,
+ 0x1b,
+ 0x1c,
+ 0x1d,
+ 0x1e,
+ 0x1f,
+ 0x5c,
+ 0xfb,
+ 0x7ffc,
+ 0x20,
+ 0xffb,
+ 0x3fc,
+ 0x1ffa,
+ 0x21,
+ 0x5d,
+ 0x5e,
+ 0x5f,
+ 0x60,
+ 0x61,
+ 0x62,
+ 0x63,
+ 0x64,
+ 0x65,
+ 0x66,
+ 0x67,
+ 0x68,
+ 0x69,
+ 0x6a,
+ 0x6b,
+ 0x6c,
+ 0x6d,
+ 0x6e,
+ 0x6f,
+ 0x70,
+ 0x71,
+ 0x72,
+ 0xfc,
+ 0x73,
+ 0xfd,
+ 0x1ffb,
+ 0x7fff0,
+ 0x1ffc,
+ 0x3ffc,
+ 0x22,
+ 0x7ffd,
+ 0x3,
+ 0x23,
+ 0x4,
+ 0x24,
+ 0x5,
+ 0x25,
+ 0x26,
+ 0x27,
+ 0x6,
+ 0x74,
+ 0x75,
+ 0x28,
+ 0x29,
+ 0x2a,
+ 0x7,
+ 0x2b,
+ 0x76,
+ 0x2c,
+ 0x8,
+ 0x9,
+ 0x2d,
+ 0x77,
+ 0x78,
+ 0x79,
+ 0x7a,
+ 0x7b,
+ 0x7ffe,
+ 0x7fc,
+ 0x3ffd,
+ 0x1ffd,
+ 0xffffffc,
+ 0xfffe6,
+ 0x3fffd2,
+ 0xfffe7,
+ 0xfffe8,
+ 0x3fffd3,
+ 0x3fffd4,
+ 0x3fffd5,
+ 0x7fffd9,
+ 0x3fffd6,
+ 0x7fffda,
+ 0x7fffdb,
+ 0x7fffdc,
+ 0x7fffdd,
+ 0x7fffde,
+ 0xffffeb,
+ 0x7fffdf,
+ 0xffffec,
+ 0xffffed,
+ 0x3fffd7,
+ 0x7fffe0,
+ 0xffffee,
+ 0x7fffe1,
+ 0x7fffe2,
+ 0x7fffe3,
+ 0x7fffe4,
+ 0x1fffdc,
+ 0x3fffd8,
+ 0x7fffe5,
+ 0x3fffd9,
+ 0x7fffe6,
+ 0x7fffe7,
+ 0xffffef,
+ 0x3fffda,
+ 0x1fffdd,
+ 0xfffe9,
+ 0x3fffdb,
+ 0x3fffdc,
+ 0x7fffe8,
+ 0x7fffe9,
+ 0x1fffde,
+ 0x7fffea,
+ 0x3fffdd,
+ 0x3fffde,
+ 0xfffff0,
+ 0x1fffdf,
+ 0x3fffdf,
+ 0x7fffeb,
+ 0x7fffec,
+ 0x1fffe0,
+ 0x1fffe1,
+ 0x3fffe0,
+ 0x1fffe2,
+ 0x7fffed,
+ 0x3fffe1,
+ 0x7fffee,
+ 0x7fffef,
+ 0xfffea,
+ 0x3fffe2,
+ 0x3fffe3,
+ 0x3fffe4,
+ 0x7ffff0,
+ 0x3fffe5,
+ 0x3fffe6,
+ 0x7ffff1,
+ 0x3ffffe0,
+ 0x3ffffe1,
+ 0xfffeb,
+ 0x7fff1,
+ 0x3fffe7,
+ 0x7ffff2,
+ 0x3fffe8,
+ 0x1ffffec,
+ 0x3ffffe2,
+ 0x3ffffe3,
+ 0x3ffffe4,
+ 0x7ffffde,
+ 0x7ffffdf,
+ 0x3ffffe5,
+ 0xfffff1,
+ 0x1ffffed,
+ 0x7fff2,
+ 0x1fffe3,
+ 0x3ffffe6,
+ 0x7ffffe0,
+ 0x7ffffe1,
+ 0x3ffffe7,
+ 0x7ffffe2,
+ 0xfffff2,
+ 0x1fffe4,
+ 0x1fffe5,
+ 0x3ffffe8,
+ 0x3ffffe9,
+ 0xffffffd,
+ 0x7ffffe3,
+ 0x7ffffe4,
+ 0x7ffffe5,
+ 0xfffec,
+ 0xfffff3,
+ 0xfffed,
+ 0x1fffe6,
+ 0x3fffe9,
+ 0x1fffe7,
+ 0x1fffe8,
+ 0x7ffff3,
+ 0x3fffea,
+ 0x3fffeb,
+ 0x1ffffee,
+ 0x1ffffef,
+ 0xfffff4,
+ 0xfffff5,
+ 0x3ffffea,
+ 0x7ffff4,
+ 0x3ffffeb,
+ 0x7ffffe6,
+ 0x3ffffec,
+ 0x3ffffed,
+ 0x7ffffe7,
+ 0x7ffffe8,
+ 0x7ffffe9,
+ 0x7ffffea,
+ 0x7ffffeb,
+ 0xffffffe,
+ 0x7ffffec,
+ 0x7ffffed,
+ 0x7ffffee,
+ 0x7ffffef,
+ 0x7fffff0,
+ 0x3ffffee,
+}
+
+var huffmanCodeLen = [256]uint8{
+ 13, 23, 28, 28, 28, 28, 28, 28, 28, 24, 30, 28, 28, 30, 28, 28,
+ 28, 28, 28, 28, 28, 28, 30, 28, 28, 28, 28, 28, 28, 28, 28, 28,
+ 6, 10, 10, 12, 13, 6, 8, 11, 10, 10, 8, 11, 8, 6, 6, 6,
+ 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 7, 8, 15, 6, 12, 10,
+ 13, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+ 7, 7, 7, 7, 7, 7, 7, 7, 8, 7, 8, 13, 19, 13, 14, 6,
+ 15, 5, 6, 5, 6, 5, 6, 6, 6, 5, 7, 7, 6, 6, 6, 5,
+ 6, 7, 6, 5, 5, 6, 7, 7, 7, 7, 7, 15, 11, 14, 13, 28,
+ 20, 22, 20, 20, 22, 22, 22, 23, 22, 23, 23, 23, 23, 23, 24, 23,
+ 24, 24, 22, 23, 24, 23, 23, 23, 23, 21, 22, 23, 22, 23, 23, 24,
+ 22, 21, 20, 22, 22, 23, 23, 21, 23, 22, 22, 24, 21, 22, 23, 23,
+ 21, 21, 22, 21, 23, 22, 23, 23, 20, 22, 22, 22, 23, 22, 22, 23,
+ 26, 26, 20, 19, 22, 23, 22, 25, 26, 26, 26, 27, 27, 26, 24, 25,
+ 19, 21, 26, 27, 27, 26, 27, 24, 21, 21, 26, 26, 28, 27, 27, 27,
+ 20, 24, 20, 21, 22, 21, 21, 23, 22, 22, 25, 25, 24, 24, 26, 23,
+ 26, 27, 26, 26, 27, 27, 27, 27, 27, 28, 27, 27, 27, 27, 27, 26,
+}
diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go
new file mode 100644
index 0000000..0173aed
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/http2.go
@@ -0,0 +1,351 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package http2 implements the HTTP/2 protocol.
+//
+// This package is low-level and intended to be used directly by very
+// few people. Most users will use it indirectly through the automatic
+// use by the net/http package (from Go 1.6 and later).
+// For use in earlier Go versions see ConfigureServer. (Transport support
+// requires Go 1.6 or later)
+//
+// See https://http2.github.io/ for more information on HTTP/2.
+//
+// See https://http2.golang.org/ for a test server running this code.
+package http2
+
+import (
+ "bufio"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "os"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+
+ "golang.org/x/net/lex/httplex"
+)
+
+var (
+ VerboseLogs bool
+ logFrameWrites bool
+ logFrameReads bool
+)
+
+func init() {
+ e := os.Getenv("GODEBUG")
+ if strings.Contains(e, "http2debug=1") {
+ VerboseLogs = true
+ }
+ if strings.Contains(e, "http2debug=2") {
+ VerboseLogs = true
+ logFrameWrites = true
+ logFrameReads = true
+ }
+}
+
+const (
+ // ClientPreface is the string that must be sent by new
+ // connections from clients.
+ ClientPreface = "PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n"
+
+ // SETTINGS_MAX_FRAME_SIZE default
+ // http://http2.github.io/http2-spec/#rfc.section.6.5.2
+ initialMaxFrameSize = 16384
+
+ // NextProtoTLS is the NPN/ALPN protocol negotiated during
+ // HTTP/2's TLS setup.
+ NextProtoTLS = "h2"
+
+ // http://http2.github.io/http2-spec/#SettingValues
+ initialHeaderTableSize = 4096
+
+ initialWindowSize = 65535 // 6.9.2 Initial Flow Control Window Size
+
+ defaultMaxReadFrameSize = 1 << 20
+)
+
+var (
+ clientPreface = []byte(ClientPreface)
+)
+
+type streamState int
+
+const (
+ stateIdle streamState = iota
+ stateOpen
+ stateHalfClosedLocal
+ stateHalfClosedRemote
+ stateResvLocal
+ stateResvRemote
+ stateClosed
+)
+
+var stateName = [...]string{
+ stateIdle: "Idle",
+ stateOpen: "Open",
+ stateHalfClosedLocal: "HalfClosedLocal",
+ stateHalfClosedRemote: "HalfClosedRemote",
+ stateResvLocal: "ResvLocal",
+ stateResvRemote: "ResvRemote",
+ stateClosed: "Closed",
+}
+
+func (st streamState) String() string {
+ return stateName[st]
+}
+
+// Setting is a setting parameter: which setting it is, and its value.
+type Setting struct {
+ // ID is which setting is being set.
+ // See http://http2.github.io/http2-spec/#SettingValues
+ ID SettingID
+
+ // Val is the value.
+ Val uint32
+}
+
+func (s Setting) String() string {
+ return fmt.Sprintf("[%v = %d]", s.ID, s.Val)
+}
+
+// Valid reports whether the setting is valid.
+func (s Setting) Valid() error {
+ // Limits and error codes from 6.5.2 Defined SETTINGS Parameters
+ switch s.ID {
+ case SettingEnablePush:
+ if s.Val != 1 && s.Val != 0 {
+ return ConnectionError(ErrCodeProtocol)
+ }
+ case SettingInitialWindowSize:
+ if s.Val > 1<<31-1 {
+ return ConnectionError(ErrCodeFlowControl)
+ }
+ case SettingMaxFrameSize:
+ if s.Val < 16384 || s.Val > 1<<24-1 {
+ return ConnectionError(ErrCodeProtocol)
+ }
+ }
+ return nil
+}
+
+// A SettingID is an HTTP/2 setting as defined in
+// http://http2.github.io/http2-spec/#iana-settings
+type SettingID uint16
+
+const (
+ SettingHeaderTableSize SettingID = 0x1
+ SettingEnablePush SettingID = 0x2
+ SettingMaxConcurrentStreams SettingID = 0x3
+ SettingInitialWindowSize SettingID = 0x4
+ SettingMaxFrameSize SettingID = 0x5
+ SettingMaxHeaderListSize SettingID = 0x6
+)
+
+var settingName = map[SettingID]string{
+ SettingHeaderTableSize: "HEADER_TABLE_SIZE",
+ SettingEnablePush: "ENABLE_PUSH",
+ SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS",
+ SettingInitialWindowSize: "INITIAL_WINDOW_SIZE",
+ SettingMaxFrameSize: "MAX_FRAME_SIZE",
+ SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE",
+}
+
+func (s SettingID) String() string {
+ if v, ok := settingName[s]; ok {
+ return v
+ }
+ return fmt.Sprintf("UNKNOWN_SETTING_%d", uint16(s))
+}
+
+var (
+ errInvalidHeaderFieldName = errors.New("http2: invalid header field name")
+ errInvalidHeaderFieldValue = errors.New("http2: invalid header field value")
+)
+
+// validWireHeaderFieldName reports whether v is a valid header field
+// name (key). See httplex.ValidHeaderName for the base rules.
+//
+// Further, http2 says:
+// "Just as in HTTP/1.x, header field names are strings of ASCII
+// characters that are compared in a case-insensitive
+// fashion. However, header field names MUST be converted to
+// lowercase prior to their encoding in HTTP/2. "
+func validWireHeaderFieldName(v string) bool {
+ if len(v) == 0 {
+ return false
+ }
+ for _, r := range v {
+ if !httplex.IsTokenRune(r) {
+ return false
+ }
+ if 'A' <= r && r <= 'Z' {
+ return false
+ }
+ }
+ return true
+}
+
+var httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n)
+
+func init() {
+ for i := 100; i <= 999; i++ {
+ if v := http.StatusText(i); v != "" {
+ httpCodeStringCommon[i] = strconv.Itoa(i)
+ }
+ }
+}
+
+func httpCodeString(code int) string {
+ if s, ok := httpCodeStringCommon[code]; ok {
+ return s
+ }
+ return strconv.Itoa(code)
+}
+
+// from pkg io
+type stringWriter interface {
+ WriteString(s string) (n int, err error)
+}
+
+// A gate lets two goroutines coordinate their activities.
+type gate chan struct{}
+
+func (g gate) Done() { g <- struct{}{} }
+func (g gate) Wait() { <-g }
+
+// A closeWaiter is like a sync.WaitGroup but only goes 1 to 0 (open to closed).
+type closeWaiter chan struct{}
+
+// Init makes a closeWaiter usable.
+// It exists because so a closeWaiter value can be placed inside a
+// larger struct and have the Mutex and Cond's memory in the same
+// allocation.
+func (cw *closeWaiter) Init() {
+ *cw = make(chan struct{})
+}
+
+// Close marks the closeWaiter as closed and unblocks any waiters.
+func (cw closeWaiter) Close() {
+ close(cw)
+}
+
+// Wait waits for the closeWaiter to become closed.
+func (cw closeWaiter) Wait() {
+ <-cw
+}
+
+// bufferedWriter is a buffered writer that writes to w.
+// Its buffered writer is lazily allocated as needed, to minimize
+// idle memory usage with many connections.
+type bufferedWriter struct {
+ w io.Writer // immutable
+ bw *bufio.Writer // non-nil when data is buffered
+}
+
+func newBufferedWriter(w io.Writer) *bufferedWriter {
+ return &bufferedWriter{w: w}
+}
+
+var bufWriterPool = sync.Pool{
+ New: func() interface{} {
+ // TODO: pick something better? this is a bit under
+ // (3 x typical 1500 byte MTU) at least.
+ return bufio.NewWriterSize(nil, 4<<10)
+ },
+}
+
+func (w *bufferedWriter) Write(p []byte) (n int, err error) {
+ if w.bw == nil {
+ bw := bufWriterPool.Get().(*bufio.Writer)
+ bw.Reset(w.w)
+ w.bw = bw
+ }
+ return w.bw.Write(p)
+}
+
+func (w *bufferedWriter) Flush() error {
+ bw := w.bw
+ if bw == nil {
+ return nil
+ }
+ err := bw.Flush()
+ bw.Reset(nil)
+ bufWriterPool.Put(bw)
+ w.bw = nil
+ return err
+}
+
+func mustUint31(v int32) uint32 {
+ if v < 0 || v > 2147483647 {
+ panic("out of range")
+ }
+ return uint32(v)
+}
+
+// bodyAllowedForStatus reports whether a given response status code
+// permits a body. See RFC 2616, section 4.4.
+func bodyAllowedForStatus(status int) bool {
+ switch {
+ case status >= 100 && status <= 199:
+ return false
+ case status == 204:
+ return false
+ case status == 304:
+ return false
+ }
+ return true
+}
+
+type httpError struct {
+ msg string
+ timeout bool
+}
+
+func (e *httpError) Error() string { return e.msg }
+func (e *httpError) Timeout() bool { return e.timeout }
+func (e *httpError) Temporary() bool { return true }
+
+var errTimeout error = &httpError{msg: "http2: timeout awaiting response headers", timeout: true}
+
+type connectionStater interface {
+ ConnectionState() tls.ConnectionState
+}
+
+var sorterPool = sync.Pool{New: func() interface{} { return new(sorter) }}
+
+type sorter struct {
+ v []string // owned by sorter
+}
+
+func (s *sorter) Len() int { return len(s.v) }
+func (s *sorter) Swap(i, j int) { s.v[i], s.v[j] = s.v[j], s.v[i] }
+func (s *sorter) Less(i, j int) bool { return s.v[i] < s.v[j] }
+
+// Keys returns the sorted keys of h.
+//
+// The returned slice is only valid until s used again or returned to
+// its pool.
+func (s *sorter) Keys(h http.Header) []string {
+ keys := s.v[:0]
+ for k := range h {
+ keys = append(keys, k)
+ }
+ s.v = keys
+ sort.Sort(s)
+ return keys
+}
+
+func (s *sorter) SortStrings(ss []string) {
+ // Our sorter works on s.v, which sorter owners, so
+ // stash it away while we sort the user's buffer.
+ save := s.v
+ s.v = ss
+ sort.Sort(s)
+ s.v = save
+}
diff --git a/vendor/golang.org/x/net/http2/not_go16.go b/vendor/golang.org/x/net/http2/not_go16.go
new file mode 100644
index 0000000..51a7f19
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/not_go16.go
@@ -0,0 +1,20 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.6
+
+package http2
+
+import (
+ "net/http"
+ "time"
+)
+
+func configureTransport(t1 *http.Transport) (*Transport, error) {
+ return nil, errTransportVersion
+}
+
+func transportExpectContinueTimeout(t1 *http.Transport) time.Duration {
+ return 0
+}
diff --git a/vendor/golang.org/x/net/http2/not_go17.go b/vendor/golang.org/x/net/http2/not_go17.go
new file mode 100644
index 0000000..28df0c1
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/not_go17.go
@@ -0,0 +1,51 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !go1.7
+
+package http2
+
+import (
+ "net"
+ "net/http"
+)
+
+type contextContext interface{}
+
+type fakeContext struct{}
+
+func (fakeContext) Done() <-chan struct{} { return nil }
+func (fakeContext) Err() error { panic("should not be called") }
+
+func reqContext(r *http.Request) fakeContext {
+ return fakeContext{}
+}
+
+func setResponseUncompressed(res *http.Response) {
+ // Nothing.
+}
+
+type clientTrace struct{}
+
+func requestTrace(*http.Request) *clientTrace { return nil }
+func traceGotConn(*http.Request, *ClientConn) {}
+func traceFirstResponseByte(*clientTrace) {}
+func traceWroteHeaders(*clientTrace) {}
+func traceWroteRequest(*clientTrace, error) {}
+func traceGot100Continue(trace *clientTrace) {}
+func traceWait100Continue(trace *clientTrace) {}
+
+func nop() {}
+
+func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) {
+ return nil, nop
+}
+
+func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) {
+ return ctx, nop
+}
+
+func requestWithContext(req *http.Request, ctx contextContext) *http.Request {
+ return req
+}
diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go
new file mode 100644
index 0000000..69446e7
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/pipe.go
@@ -0,0 +1,147 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "errors"
+ "io"
+ "sync"
+)
+
+// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like
+// io.Pipe except there are no PipeReader/PipeWriter halves, and the
+// underlying buffer is an interface. (io.Pipe is always unbuffered)
+type pipe struct {
+ mu sync.Mutex
+ c sync.Cond // c.L lazily initialized to &p.mu
+ b pipeBuffer
+ err error // read error once empty. non-nil means closed.
+ breakErr error // immediate read error (caller doesn't see rest of b)
+ donec chan struct{} // closed on error
+ readFn func() // optional code to run in Read before error
+}
+
+type pipeBuffer interface {
+ Len() int
+ io.Writer
+ io.Reader
+}
+
+// Read waits until data is available and copies bytes
+// from the buffer into p.
+func (p *pipe) Read(d []byte) (n int, err error) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.c.L == nil {
+ p.c.L = &p.mu
+ }
+ for {
+ if p.breakErr != nil {
+ return 0, p.breakErr
+ }
+ if p.b.Len() > 0 {
+ return p.b.Read(d)
+ }
+ if p.err != nil {
+ if p.readFn != nil {
+ p.readFn() // e.g. copy trailers
+ p.readFn = nil // not sticky like p.err
+ }
+ return 0, p.err
+ }
+ p.c.Wait()
+ }
+}
+
+var errClosedPipeWrite = errors.New("write on closed buffer")
+
+// Write copies bytes from p into the buffer and wakes a reader.
+// It is an error to write more data than the buffer can hold.
+func (p *pipe) Write(d []byte) (n int, err error) {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.c.L == nil {
+ p.c.L = &p.mu
+ }
+ defer p.c.Signal()
+ if p.err != nil {
+ return 0, errClosedPipeWrite
+ }
+ return p.b.Write(d)
+}
+
+// CloseWithError causes the next Read (waking up a current blocked
+// Read if needed) to return the provided err after all data has been
+// read.
+//
+// The error must be non-nil.
+func (p *pipe) CloseWithError(err error) { p.closeWithError(&p.err, err, nil) }
+
+// BreakWithError causes the next Read (waking up a current blocked
+// Read if needed) to return the provided err immediately, without
+// waiting for unread data.
+func (p *pipe) BreakWithError(err error) { p.closeWithError(&p.breakErr, err, nil) }
+
+// closeWithErrorAndCode is like CloseWithError but also sets some code to run
+// in the caller's goroutine before returning the error.
+func (p *pipe) closeWithErrorAndCode(err error, fn func()) { p.closeWithError(&p.err, err, fn) }
+
+func (p *pipe) closeWithError(dst *error, err error, fn func()) {
+ if err == nil {
+ panic("err must be non-nil")
+ }
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.c.L == nil {
+ p.c.L = &p.mu
+ }
+ defer p.c.Signal()
+ if *dst != nil {
+ // Already been done.
+ return
+ }
+ p.readFn = fn
+ *dst = err
+ p.closeDoneLocked()
+}
+
+// requires p.mu be held.
+func (p *pipe) closeDoneLocked() {
+ if p.donec == nil {
+ return
+ }
+ // Close if unclosed. This isn't racy since we always
+ // hold p.mu while closing.
+ select {
+ case <-p.donec:
+ default:
+ close(p.donec)
+ }
+}
+
+// Err returns the error (if any) first set by BreakWithError or CloseWithError.
+func (p *pipe) Err() error {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.breakErr != nil {
+ return p.breakErr
+ }
+ return p.err
+}
+
+// Done returns a channel which is closed if and when this pipe is closed
+// with CloseWithError.
+func (p *pipe) Done() <-chan struct{} {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ if p.donec == nil {
+ p.donec = make(chan struct{})
+ if p.err != nil || p.breakErr != nil {
+ // Already hit an error.
+ p.closeDoneLocked()
+ }
+ }
+ return p.donec
+}
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
new file mode 100644
index 0000000..1de8146
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -0,0 +1,2287 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// TODO: replace all <-sc.doneServing with reads from the stream's cw
+// instead, and make sure that on close we close all open
+// streams. then remove doneServing?
+
+// TODO: re-audit GOAWAY support. Consider each incoming frame type and
+// whether it should be ignored during graceful shutdown.
+
+// TODO: disconnect idle clients. GFE seems to do 4 minutes. make
+// configurable? or maximum number of idle clients and remove the
+// oldest?
+
+// TODO: turn off the serve goroutine when idle, so
+// an idle conn only has the readFrames goroutine active. (which could
+// also be optimized probably to pin less memory in crypto/tls). This
+// would involve tracking when the serve goroutine is active (atomic
+// int32 read/CAS probably?) and starting it up when frames arrive,
+// and shutting it down when all handlers exit. the occasional PING
+// packets could use time.AfterFunc to call sc.wakeStartServeLoop()
+// (which is a no-op if already running) and then queue the PING write
+// as normal. The serve loop would then exit in most cases (if no
+// Handlers running) and not be woken up again until the PING packet
+// returns.
+
+// TODO (maybe): add a mechanism for Handlers to going into
+// half-closed-local mode (rw.(io.Closer) test?) but not exit their
+// handler, and continue to be able to read from the
+// Request.Body. This would be a somewhat semantic change from HTTP/1
+// (or at least what we expose in net/http), so I'd probably want to
+// add it there too. For now, this package says that returning from
+// the Handler ServeHTTP function means you're both done reading and
+// done writing, without a way to stop just one or the other.
+
+package http2
+
+import (
+ "bufio"
+ "bytes"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io"
+ "log"
+ "net"
+ "net/http"
+ "net/textproto"
+ "net/url"
+ "os"
+ "reflect"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/http2/hpack"
+)
+
+const (
+ prefaceTimeout = 10 * time.Second
+ firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway
+ handlerChunkWriteSize = 4 << 10
+ defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to?
+)
+
+var (
+ errClientDisconnected = errors.New("client disconnected")
+ errClosedBody = errors.New("body closed by handler")
+ errHandlerComplete = errors.New("http2: request body closed due to handler exiting")
+ errStreamClosed = errors.New("http2: stream closed")
+)
+
+var responseWriterStatePool = sync.Pool{
+ New: func() interface{} {
+ rws := &responseWriterState{}
+ rws.bw = bufio.NewWriterSize(chunkWriter{rws}, handlerChunkWriteSize)
+ return rws
+ },
+}
+
+// Test hooks.
+var (
+ testHookOnConn func()
+ testHookGetServerConn func(*serverConn)
+ testHookOnPanicMu *sync.Mutex // nil except in tests
+ testHookOnPanic func(sc *serverConn, panicVal interface{}) (rePanic bool)
+)
+
+// Server is an HTTP/2 server.
+type Server struct {
+ // MaxHandlers limits the number of http.Handler ServeHTTP goroutines
+ // which may run at a time over all connections.
+ // Negative or zero no limit.
+ // TODO: implement
+ MaxHandlers int
+
+ // MaxConcurrentStreams optionally specifies the number of
+ // concurrent streams that each client may have open at a
+ // time. This is unrelated to the number of http.Handler goroutines
+ // which may be active globally, which is MaxHandlers.
+ // If zero, MaxConcurrentStreams defaults to at least 100, per
+ // the HTTP/2 spec's recommendations.
+ MaxConcurrentStreams uint32
+
+ // MaxReadFrameSize optionally specifies the largest frame
+ // this server is willing to read. A valid value is between
+ // 16k and 16M, inclusive. If zero or otherwise invalid, a
+ // default value is used.
+ MaxReadFrameSize uint32
+
+ // PermitProhibitedCipherSuites, if true, permits the use of
+ // cipher suites prohibited by the HTTP/2 spec.
+ PermitProhibitedCipherSuites bool
+}
+
+func (s *Server) maxReadFrameSize() uint32 {
+ if v := s.MaxReadFrameSize; v >= minMaxFrameSize && v <= maxFrameSize {
+ return v
+ }
+ return defaultMaxReadFrameSize
+}
+
+func (s *Server) maxConcurrentStreams() uint32 {
+ if v := s.MaxConcurrentStreams; v > 0 {
+ return v
+ }
+ return defaultMaxStreams
+}
+
+// ConfigureServer adds HTTP/2 support to a net/http Server.
+//
+// The configuration conf may be nil.
+//
+// ConfigureServer must be called before s begins serving.
+func ConfigureServer(s *http.Server, conf *Server) error {
+ if conf == nil {
+ conf = new(Server)
+ }
+
+ if s.TLSConfig == nil {
+ s.TLSConfig = new(tls.Config)
+ } else if s.TLSConfig.CipherSuites != nil {
+ // If they already provided a CipherSuite list, return
+ // an error if it has a bad order or is missing
+ // ECDHE_RSA_WITH_AES_128_GCM_SHA256.
+ const requiredCipher = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256
+ haveRequired := false
+ sawBad := false
+ for i, cs := range s.TLSConfig.CipherSuites {
+ if cs == requiredCipher {
+ haveRequired = true
+ }
+ if isBadCipher(cs) {
+ sawBad = true
+ } else if sawBad {
+ return fmt.Errorf("http2: TLSConfig.CipherSuites index %d contains an HTTP/2-approved cipher suite (%#04x), but it comes after unapproved cipher suites. With this configuration, clients that don't support previous, approved cipher suites may be given an unapproved one and reject the connection.", i, cs)
+ }
+ }
+ if !haveRequired {
+ return fmt.Errorf("http2: TLSConfig.CipherSuites is missing HTTP/2-required TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256")
+ }
+ }
+
+ // Note: not setting MinVersion to tls.VersionTLS12,
+ // as we don't want to interfere with HTTP/1.1 traffic
+ // on the user's server. We enforce TLS 1.2 later once
+ // we accept a connection. Ideally this should be done
+ // during next-proto selection, but using TLS <1.2 with
+ // HTTP/2 is still the client's bug.
+
+ s.TLSConfig.PreferServerCipherSuites = true
+
+ haveNPN := false
+ for _, p := range s.TLSConfig.NextProtos {
+ if p == NextProtoTLS {
+ haveNPN = true
+ break
+ }
+ }
+ if !haveNPN {
+ s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, NextProtoTLS)
+ }
+ // h2-14 is temporary (as of 2015-03-05) while we wait for all browsers
+ // to switch to "h2".
+ s.TLSConfig.NextProtos = append(s.TLSConfig.NextProtos, "h2-14")
+
+ if s.TLSNextProto == nil {
+ s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){}
+ }
+ protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) {
+ if testHookOnConn != nil {
+ testHookOnConn()
+ }
+ conf.ServeConn(c, &ServeConnOpts{
+ Handler: h,
+ BaseConfig: hs,
+ })
+ }
+ s.TLSNextProto[NextProtoTLS] = protoHandler
+ s.TLSNextProto["h2-14"] = protoHandler // temporary; see above.
+ return nil
+}
+
+// ServeConnOpts are options for the Server.ServeConn method.
+type ServeConnOpts struct {
+ // BaseConfig optionally sets the base configuration
+ // for values. If nil, defaults are used.
+ BaseConfig *http.Server
+
+ // Handler specifies which handler to use for processing
+ // requests. If nil, BaseConfig.Handler is used. If BaseConfig
+ // or BaseConfig.Handler is nil, http.DefaultServeMux is used.
+ Handler http.Handler
+}
+
+func (o *ServeConnOpts) baseConfig() *http.Server {
+ if o != nil && o.BaseConfig != nil {
+ return o.BaseConfig
+ }
+ return new(http.Server)
+}
+
+func (o *ServeConnOpts) handler() http.Handler {
+ if o != nil {
+ if o.Handler != nil {
+ return o.Handler
+ }
+ if o.BaseConfig != nil && o.BaseConfig.Handler != nil {
+ return o.BaseConfig.Handler
+ }
+ }
+ return http.DefaultServeMux
+}
+
+// ServeConn serves HTTP/2 requests on the provided connection and
+// blocks until the connection is no longer readable.
+//
+// ServeConn starts speaking HTTP/2 assuming that c has not had any
+// reads or writes. It writes its initial settings frame and expects
+// to be able to read the preface and settings frame from the
+// client. If c has a ConnectionState method like a *tls.Conn, the
+// ConnectionState is used to verify the TLS ciphersuite and to set
+// the Request.TLS field in Handlers.
+//
+// ServeConn does not support h2c by itself. Any h2c support must be
+// implemented in terms of providing a suitably-behaving net.Conn.
+//
+// The opts parameter is optional. If nil, default values are used.
+func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
+ baseCtx, cancel := serverConnBaseContext(c, opts)
+ defer cancel()
+
+ sc := &serverConn{
+ srv: s,
+ hs: opts.baseConfig(),
+ conn: c,
+ baseCtx: baseCtx,
+ remoteAddrStr: c.RemoteAddr().String(),
+ bw: newBufferedWriter(c),
+ handler: opts.handler(),
+ streams: make(map[uint32]*stream),
+ readFrameCh: make(chan readFrameResult),
+ wantWriteFrameCh: make(chan frameWriteMsg, 8),
+ wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync
+ bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way
+ doneServing: make(chan struct{}),
+ advMaxStreams: s.maxConcurrentStreams(),
+ writeSched: writeScheduler{
+ maxFrameSize: initialMaxFrameSize,
+ },
+ initialWindowSize: initialWindowSize,
+ headerTableSize: initialHeaderTableSize,
+ serveG: newGoroutineLock(),
+ pushEnabled: true,
+ }
+
+ sc.flow.add(initialWindowSize)
+ sc.inflow.add(initialWindowSize)
+ sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
+
+ fr := NewFramer(sc.bw, c)
+ fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil)
+ fr.MaxHeaderListSize = sc.maxHeaderListSize()
+ fr.SetMaxReadFrameSize(s.maxReadFrameSize())
+ sc.framer = fr
+
+ if tc, ok := c.(connectionStater); ok {
+ sc.tlsState = new(tls.ConnectionState)
+ *sc.tlsState = tc.ConnectionState()
+ // 9.2 Use of TLS Features
+ // An implementation of HTTP/2 over TLS MUST use TLS
+ // 1.2 or higher with the restrictions on feature set
+ // and cipher suite described in this section. Due to
+ // implementation limitations, it might not be
+ // possible to fail TLS negotiation. An endpoint MUST
+ // immediately terminate an HTTP/2 connection that
+ // does not meet the TLS requirements described in
+ // this section with a connection error (Section
+ // 5.4.1) of type INADEQUATE_SECURITY.
+ if sc.tlsState.Version < tls.VersionTLS12 {
+ sc.rejectConn(ErrCodeInadequateSecurity, "TLS version too low")
+ return
+ }
+
+ if sc.tlsState.ServerName == "" {
+ // Client must use SNI, but we don't enforce that anymore,
+ // since it was causing problems when connecting to bare IP
+ // addresses during development.
+ //
+ // TODO: optionally enforce? Or enforce at the time we receive
+ // a new request, and verify the the ServerName matches the :authority?
+ // But that precludes proxy situations, perhaps.
+ //
+ // So for now, do nothing here again.
+ }
+
+ if !s.PermitProhibitedCipherSuites && isBadCipher(sc.tlsState.CipherSuite) {
+ // "Endpoints MAY choose to generate a connection error
+ // (Section 5.4.1) of type INADEQUATE_SECURITY if one of
+ // the prohibited cipher suites are negotiated."
+ //
+ // We choose that. In my opinion, the spec is weak
+ // here. It also says both parties must support at least
+ // TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 so there's no
+ // excuses here. If we really must, we could allow an
+ // "AllowInsecureWeakCiphers" option on the server later.
+ // Let's see how it plays out first.
+ sc.rejectConn(ErrCodeInadequateSecurity, fmt.Sprintf("Prohibited TLS 1.2 Cipher Suite: %x", sc.tlsState.CipherSuite))
+ return
+ }
+ }
+
+ if hook := testHookGetServerConn; hook != nil {
+ hook(sc)
+ }
+ sc.serve()
+}
+
+// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec.
+func isBadCipher(cipher uint16) bool {
+ switch cipher {
+ case tls.TLS_RSA_WITH_RC4_128_SHA,
+ tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA,
+ tls.TLS_RSA_WITH_AES_128_CBC_SHA,
+ tls.TLS_RSA_WITH_AES_256_CBC_SHA,
+ tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA,
+ tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA,
+ tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,
+ tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA:
+ // Reject cipher suites from Appendix A.
+ // "This list includes those cipher suites that do not
+ // offer an ephemeral key exchange and those that are
+ // based on the TLS null, stream or block cipher type"
+ return true
+ default:
+ return false
+ }
+}
+
+func (sc *serverConn) rejectConn(err ErrCode, debug string) {
+ sc.vlogf("http2: server rejecting conn: %v, %s", err, debug)
+ // ignoring errors. hanging up anyway.
+ sc.framer.WriteGoAway(0, err, []byte(debug))
+ sc.bw.Flush()
+ sc.conn.Close()
+}
+
+type serverConn struct {
+ // Immutable:
+ srv *Server
+ hs *http.Server
+ conn net.Conn
+ bw *bufferedWriter // writing to conn
+ handler http.Handler
+ baseCtx contextContext
+ framer *Framer
+ doneServing chan struct{} // closed when serverConn.serve ends
+ readFrameCh chan readFrameResult // written by serverConn.readFrames
+ wantWriteFrameCh chan frameWriteMsg // from handlers -> serve
+ wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes
+ bodyReadCh chan bodyReadMsg // from handlers -> serve
+ testHookCh chan func(int) // code to run on the serve loop
+ flow flow // conn-wide (not stream-specific) outbound flow control
+ inflow flow // conn-wide inbound flow control
+ tlsState *tls.ConnectionState // shared by all handlers, like net/http
+ remoteAddrStr string
+
+ // Everything following is owned by the serve loop; use serveG.check():
+ serveG goroutineLock // used to verify funcs are on serve()
+ pushEnabled bool
+ sawFirstSettings bool // got the initial SETTINGS frame after the preface
+ needToSendSettingsAck bool
+ unackedSettings int // how many SETTINGS have we sent without ACKs?
+ clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit)
+ advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
+ curOpenStreams uint32 // client's number of open streams
+ maxStreamID uint32 // max ever seen
+ streams map[uint32]*stream
+ initialWindowSize int32
+ headerTableSize uint32
+ peerMaxHeaderListSize uint32 // zero means unknown (default)
+ canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case
+ writingFrame bool // started write goroutine but haven't heard back on wroteFrameCh
+ needsFrameFlush bool // last frame write wasn't a flush
+ writeSched writeScheduler
+ inGoAway bool // we've started to or sent GOAWAY
+ needToSendGoAway bool // we need to schedule a GOAWAY frame write
+ goAwayCode ErrCode
+ shutdownTimerCh <-chan time.Time // nil until used
+ shutdownTimer *time.Timer // nil until used
+ freeRequestBodyBuf []byte // if non-nil, a free initialWindowSize buffer for getRequestBodyBuf
+
+ // Owned by the writeFrameAsync goroutine:
+ headerWriteBuf bytes.Buffer
+ hpackEncoder *hpack.Encoder
+}
+
+func (sc *serverConn) maxHeaderListSize() uint32 {
+ n := sc.hs.MaxHeaderBytes
+ if n <= 0 {
+ n = http.DefaultMaxHeaderBytes
+ }
+ // http2's count is in a slightly different unit and includes 32 bytes per pair.
+ // So, take the net/http.Server value and pad it up a bit, assuming 10 headers.
+ const perFieldOverhead = 32 // per http2 spec
+ const typicalHeaders = 10 // conservative
+ return uint32(n + typicalHeaders*perFieldOverhead)
+}
+
+// stream represents a stream. This is the minimal metadata needed by
+// the serve goroutine. Most of the actual stream state is owned by
+// the http.Handler's goroutine in the responseWriter. Because the
+// responseWriter's responseWriterState is recycled at the end of a
+// handler, this struct intentionally has no pointer to the
+// *responseWriter{,State} itself, as the Handler ending nils out the
+// responseWriter's state field.
+type stream struct {
+ // immutable:
+ sc *serverConn
+ id uint32
+ body *pipe // non-nil if expecting DATA frames
+ cw closeWaiter // closed wait stream transitions to closed state
+ ctx contextContext
+ cancelCtx func()
+
+ // owned by serverConn's serve loop:
+ bodyBytes int64 // body bytes seen so far
+ declBodyBytes int64 // or -1 if undeclared
+ flow flow // limits writing from Handler to client
+ inflow flow // what the client is allowed to POST/etc to us
+ parent *stream // or nil
+ numTrailerValues int64
+ weight uint8
+ state streamState
+ sentReset bool // only true once detached from streams map
+ gotReset bool // only true once detacted from streams map
+ gotTrailerHeader bool // HEADER frame for trailers was seen
+ wroteHeaders bool // whether we wrote headers (not status 100)
+ reqBuf []byte
+
+ trailer http.Header // accumulated trailers
+ reqTrailer http.Header // handler's Request.Trailer
+}
+
+func (sc *serverConn) Framer() *Framer { return sc.framer }
+func (sc *serverConn) CloseConn() error { return sc.conn.Close() }
+func (sc *serverConn) Flush() error { return sc.bw.Flush() }
+func (sc *serverConn) HeaderEncoder() (*hpack.Encoder, *bytes.Buffer) {
+ return sc.hpackEncoder, &sc.headerWriteBuf
+}
+
+func (sc *serverConn) state(streamID uint32) (streamState, *stream) {
+ sc.serveG.check()
+ // http://http2.github.io/http2-spec/#rfc.section.5.1
+ if st, ok := sc.streams[streamID]; ok {
+ return st.state, st
+ }
+ // "The first use of a new stream identifier implicitly closes all
+ // streams in the "idle" state that might have been initiated by
+ // that peer with a lower-valued stream identifier. For example, if
+ // a client sends a HEADERS frame on stream 7 without ever sending a
+ // frame on stream 5, then stream 5 transitions to the "closed"
+ // state when the first frame for stream 7 is sent or received."
+ if streamID <= sc.maxStreamID {
+ return stateClosed, nil
+ }
+ return stateIdle, nil
+}
+
+// setConnState calls the net/http ConnState hook for this connection, if configured.
+// Note that the net/http package does StateNew and StateClosed for us.
+// There is currently no plan for StateHijacked or hijacking HTTP/2 connections.
+func (sc *serverConn) setConnState(state http.ConnState) {
+ if sc.hs.ConnState != nil {
+ sc.hs.ConnState(sc.conn, state)
+ }
+}
+
+func (sc *serverConn) vlogf(format string, args ...interface{}) {
+ if VerboseLogs {
+ sc.logf(format, args...)
+ }
+}
+
+func (sc *serverConn) logf(format string, args ...interface{}) {
+ if lg := sc.hs.ErrorLog; lg != nil {
+ lg.Printf(format, args...)
+ } else {
+ log.Printf(format, args...)
+ }
+}
+
+// errno returns v's underlying uintptr, else 0.
+//
+// TODO: remove this helper function once http2 can use build
+// tags. See comment in isClosedConnError.
+func errno(v error) uintptr {
+ if rv := reflect.ValueOf(v); rv.Kind() == reflect.Uintptr {
+ return uintptr(rv.Uint())
+ }
+ return 0
+}
+
+// isClosedConnError reports whether err is an error from use of a closed
+// network connection.
+func isClosedConnError(err error) bool {
+ if err == nil {
+ return false
+ }
+
+ // TODO: remove this string search and be more like the Windows
+ // case below. That might involve modifying the standard library
+ // to return better error types.
+ str := err.Error()
+ if strings.Contains(str, "use of closed network connection") {
+ return true
+ }
+
+ // TODO(bradfitz): x/tools/cmd/bundle doesn't really support
+ // build tags, so I can't make an http2_windows.go file with
+ // Windows-specific stuff. Fix that and move this, once we
+ // have a way to bundle this into std's net/http somehow.
+ if runtime.GOOS == "windows" {
+ if oe, ok := err.(*net.OpError); ok && oe.Op == "read" {
+ if se, ok := oe.Err.(*os.SyscallError); ok && se.Syscall == "wsarecv" {
+ const WSAECONNABORTED = 10053
+ const WSAECONNRESET = 10054
+ if n := errno(se.Err); n == WSAECONNRESET || n == WSAECONNABORTED {
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
+
+func (sc *serverConn) condlogf(err error, format string, args ...interface{}) {
+ if err == nil {
+ return
+ }
+ if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) {
+ // Boring, expected errors.
+ sc.vlogf(format, args...)
+ } else {
+ sc.logf(format, args...)
+ }
+}
+
+func (sc *serverConn) canonicalHeader(v string) string {
+ sc.serveG.check()
+ cv, ok := commonCanonHeader[v]
+ if ok {
+ return cv
+ }
+ cv, ok = sc.canonHeader[v]
+ if ok {
+ return cv
+ }
+ if sc.canonHeader == nil {
+ sc.canonHeader = make(map[string]string)
+ }
+ cv = http.CanonicalHeaderKey(v)
+ sc.canonHeader[v] = cv
+ return cv
+}
+
+type readFrameResult struct {
+ f Frame // valid until readMore is called
+ err error
+
+ // readMore should be called once the consumer no longer needs or
+ // retains f. After readMore, f is invalid and more frames can be
+ // read.
+ readMore func()
+}
+
+// readFrames is the loop that reads incoming frames.
+// It takes care to only read one frame at a time, blocking until the
+// consumer is done with the frame.
+// It's run on its own goroutine.
+func (sc *serverConn) readFrames() {
+ gate := make(gate)
+ gateDone := gate.Done
+ for {
+ f, err := sc.framer.ReadFrame()
+ select {
+ case sc.readFrameCh <- readFrameResult{f, err, gateDone}:
+ case <-sc.doneServing:
+ return
+ }
+ select {
+ case <-gate:
+ case <-sc.doneServing:
+ return
+ }
+ if terminalReadFrameError(err) {
+ return
+ }
+ }
+}
+
+// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine.
+type frameWriteResult struct {
+ wm frameWriteMsg // what was written (or attempted)
+ err error // result of the writeFrame call
+}
+
+// writeFrameAsync runs in its own goroutine and writes a single frame
+// and then reports when it's done.
+// At most one goroutine can be running writeFrameAsync at a time per
+// serverConn.
+func (sc *serverConn) writeFrameAsync(wm frameWriteMsg) {
+ err := wm.write.writeFrame(sc)
+ sc.wroteFrameCh <- frameWriteResult{wm, err}
+}
+
+func (sc *serverConn) closeAllStreamsOnConnClose() {
+ sc.serveG.check()
+ for _, st := range sc.streams {
+ sc.closeStream(st, errClientDisconnected)
+ }
+}
+
+func (sc *serverConn) stopShutdownTimer() {
+ sc.serveG.check()
+ if t := sc.shutdownTimer; t != nil {
+ t.Stop()
+ }
+}
+
+func (sc *serverConn) notePanic() {
+ // Note: this is for serverConn.serve panicking, not http.Handler code.
+ if testHookOnPanicMu != nil {
+ testHookOnPanicMu.Lock()
+ defer testHookOnPanicMu.Unlock()
+ }
+ if testHookOnPanic != nil {
+ if e := recover(); e != nil {
+ if testHookOnPanic(sc, e) {
+ panic(e)
+ }
+ }
+ }
+}
+
+func (sc *serverConn) serve() {
+ sc.serveG.check()
+ defer sc.notePanic()
+ defer sc.conn.Close()
+ defer sc.closeAllStreamsOnConnClose()
+ defer sc.stopShutdownTimer()
+ defer close(sc.doneServing) // unblocks handlers trying to send
+
+ if VerboseLogs {
+ sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs)
+ }
+
+ sc.writeFrame(frameWriteMsg{
+ write: writeSettings{
+ {SettingMaxFrameSize, sc.srv.maxReadFrameSize()},
+ {SettingMaxConcurrentStreams, sc.advMaxStreams},
+ {SettingMaxHeaderListSize, sc.maxHeaderListSize()},
+
+ // TODO: more actual settings, notably
+ // SettingInitialWindowSize, but then we also
+ // want to bump up the conn window size the
+ // same amount here right after the settings
+ },
+ })
+ sc.unackedSettings++
+
+ if err := sc.readPreface(); err != nil {
+ sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err)
+ return
+ }
+ // Now that we've got the preface, get us out of the
+ // "StateNew" state. We can't go directly to idle, though.
+ // Active means we read some data and anticipate a request. We'll
+ // do another Active when we get a HEADERS frame.
+ sc.setConnState(http.StateActive)
+ sc.setConnState(http.StateIdle)
+
+ go sc.readFrames() // closed by defer sc.conn.Close above
+
+ settingsTimer := time.NewTimer(firstSettingsTimeout)
+ loopNum := 0
+ for {
+ loopNum++
+ select {
+ case wm := <-sc.wantWriteFrameCh:
+ sc.writeFrame(wm)
+ case res := <-sc.wroteFrameCh:
+ sc.wroteFrame(res)
+ case res := <-sc.readFrameCh:
+ if !sc.processFrameFromReader(res) {
+ return
+ }
+ res.readMore()
+ if settingsTimer.C != nil {
+ settingsTimer.Stop()
+ settingsTimer.C = nil
+ }
+ case m := <-sc.bodyReadCh:
+ sc.noteBodyRead(m.st, m.n)
+ case <-settingsTimer.C:
+ sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr())
+ return
+ case <-sc.shutdownTimerCh:
+ sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr())
+ return
+ case fn := <-sc.testHookCh:
+ fn(loopNum)
+ }
+ }
+}
+
+// readPreface reads the ClientPreface greeting from the peer
+// or returns an error on timeout or an invalid greeting.
+func (sc *serverConn) readPreface() error {
+ errc := make(chan error, 1)
+ go func() {
+ // Read the client preface
+ buf := make([]byte, len(ClientPreface))
+ if _, err := io.ReadFull(sc.conn, buf); err != nil {
+ errc <- err
+ } else if !bytes.Equal(buf, clientPreface) {
+ errc <- fmt.Errorf("bogus greeting %q", buf)
+ } else {
+ errc <- nil
+ }
+ }()
+ timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server?
+ defer timer.Stop()
+ select {
+ case <-timer.C:
+ return errors.New("timeout waiting for client preface")
+ case err := <-errc:
+ if err == nil {
+ if VerboseLogs {
+ sc.vlogf("http2: server: client %v said hello", sc.conn.RemoteAddr())
+ }
+ }
+ return err
+ }
+}
+
+var errChanPool = sync.Pool{
+ New: func() interface{} { return make(chan error, 1) },
+}
+
+var writeDataPool = sync.Pool{
+ New: func() interface{} { return new(writeData) },
+}
+
+// writeDataFromHandler writes DATA response frames from a handler on
+// the given stream.
+func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error {
+ ch := errChanPool.Get().(chan error)
+ writeArg := writeDataPool.Get().(*writeData)
+ *writeArg = writeData{stream.id, data, endStream}
+ err := sc.writeFrameFromHandler(frameWriteMsg{
+ write: writeArg,
+ stream: stream,
+ done: ch,
+ })
+ if err != nil {
+ return err
+ }
+ var frameWriteDone bool // the frame write is done (successfully or not)
+ select {
+ case err = <-ch:
+ frameWriteDone = true
+ case <-sc.doneServing:
+ return errClientDisconnected
+ case <-stream.cw:
+ // If both ch and stream.cw were ready (as might
+ // happen on the final Write after an http.Handler
+ // ends), prefer the write result. Otherwise this
+ // might just be us successfully closing the stream.
+ // The writeFrameAsync and serve goroutines guarantee
+ // that the ch send will happen before the stream.cw
+ // close.
+ select {
+ case err = <-ch:
+ frameWriteDone = true
+ default:
+ return errStreamClosed
+ }
+ }
+ errChanPool.Put(ch)
+ if frameWriteDone {
+ writeDataPool.Put(writeArg)
+ }
+ return err
+}
+
+// writeFrameFromHandler sends wm to sc.wantWriteFrameCh, but aborts
+// if the connection has gone away.
+//
+// This must not be run from the serve goroutine itself, else it might
+// deadlock writing to sc.wantWriteFrameCh (which is only mildly
+// buffered and is read by serve itself). If you're on the serve
+// goroutine, call writeFrame instead.
+func (sc *serverConn) writeFrameFromHandler(wm frameWriteMsg) error {
+ sc.serveG.checkNotOn() // NOT
+ select {
+ case sc.wantWriteFrameCh <- wm:
+ return nil
+ case <-sc.doneServing:
+ // Serve loop is gone.
+ // Client has closed their connection to the server.
+ return errClientDisconnected
+ }
+}
+
+// writeFrame schedules a frame to write and sends it if there's nothing
+// already being written.
+//
+// There is no pushback here (the serve goroutine never blocks). It's
+// the http.Handlers that block, waiting for their previous frames to
+// make it onto the wire
+//
+// If you're not on the serve goroutine, use writeFrameFromHandler instead.
+func (sc *serverConn) writeFrame(wm frameWriteMsg) {
+ sc.serveG.check()
+
+ var ignoreWrite bool
+
+ // Don't send a 100-continue response if we've already sent headers.
+ // See golang.org/issue/14030.
+ switch wm.write.(type) {
+ case *writeResHeaders:
+ wm.stream.wroteHeaders = true
+ case write100ContinueHeadersFrame:
+ if wm.stream.wroteHeaders {
+ ignoreWrite = true
+ }
+ }
+
+ if !ignoreWrite {
+ sc.writeSched.add(wm)
+ }
+ sc.scheduleFrameWrite()
+}
+
+// startFrameWrite starts a goroutine to write wm (in a separate
+// goroutine since that might block on the network), and updates the
+// serve goroutine's state about the world, updated from info in wm.
+func (sc *serverConn) startFrameWrite(wm frameWriteMsg) {
+ sc.serveG.check()
+ if sc.writingFrame {
+ panic("internal error: can only be writing one frame at a time")
+ }
+
+ st := wm.stream
+ if st != nil {
+ switch st.state {
+ case stateHalfClosedLocal:
+ panic("internal error: attempt to send frame on half-closed-local stream")
+ case stateClosed:
+ if st.sentReset || st.gotReset {
+ // Skip this frame.
+ sc.scheduleFrameWrite()
+ return
+ }
+ panic(fmt.Sprintf("internal error: attempt to send a write %v on a closed stream", wm))
+ }
+ }
+
+ sc.writingFrame = true
+ sc.needsFrameFlush = true
+ go sc.writeFrameAsync(wm)
+}
+
+// errHandlerPanicked is the error given to any callers blocked in a read from
+// Request.Body when the main goroutine panics. Since most handlers read in the
+// the main ServeHTTP goroutine, this will show up rarely.
+var errHandlerPanicked = errors.New("http2: handler panicked")
+
+// wroteFrame is called on the serve goroutine with the result of
+// whatever happened on writeFrameAsync.
+func (sc *serverConn) wroteFrame(res frameWriteResult) {
+ sc.serveG.check()
+ if !sc.writingFrame {
+ panic("internal error: expected to be already writing a frame")
+ }
+ sc.writingFrame = false
+
+ wm := res.wm
+ st := wm.stream
+
+ closeStream := endsStream(wm.write)
+
+ if _, ok := wm.write.(handlerPanicRST); ok {
+ sc.closeStream(st, errHandlerPanicked)
+ }
+
+ // Reply (if requested) to the blocked ServeHTTP goroutine.
+ if ch := wm.done; ch != nil {
+ select {
+ case ch <- res.err:
+ default:
+ panic(fmt.Sprintf("unbuffered done channel passed in for type %T", wm.write))
+ }
+ }
+ wm.write = nil // prevent use (assume it's tainted after wm.done send)
+
+ if closeStream {
+ if st == nil {
+ panic("internal error: expecting non-nil stream")
+ }
+ switch st.state {
+ case stateOpen:
+ // Here we would go to stateHalfClosedLocal in
+ // theory, but since our handler is done and
+ // the net/http package provides no mechanism
+ // for finishing writing to a ResponseWriter
+ // while still reading data (see possible TODO
+ // at top of this file), we go into closed
+ // state here anyway, after telling the peer
+ // we're hanging up on them.
+ st.state = stateHalfClosedLocal // won't last long, but necessary for closeStream via resetStream
+ errCancel := StreamError{st.id, ErrCodeCancel}
+ sc.resetStream(errCancel)
+ case stateHalfClosedRemote:
+ sc.closeStream(st, errHandlerComplete)
+ }
+ }
+
+ sc.scheduleFrameWrite()
+}
+
+// scheduleFrameWrite tickles the frame writing scheduler.
+//
+// If a frame is already being written, nothing happens. This will be called again
+// when the frame is done being written.
+//
+// If a frame isn't being written we need to send one, the best frame
+// to send is selected, preferring first things that aren't
+// stream-specific (e.g. ACKing settings), and then finding the
+// highest priority stream.
+//
+// If a frame isn't being written and there's nothing else to send, we
+// flush the write buffer.
+func (sc *serverConn) scheduleFrameWrite() {
+ sc.serveG.check()
+ if sc.writingFrame {
+ return
+ }
+ if sc.needToSendGoAway {
+ sc.needToSendGoAway = false
+ sc.startFrameWrite(frameWriteMsg{
+ write: &writeGoAway{
+ maxStreamID: sc.maxStreamID,
+ code: sc.goAwayCode,
+ },
+ })
+ return
+ }
+ if sc.needToSendSettingsAck {
+ sc.needToSendSettingsAck = false
+ sc.startFrameWrite(frameWriteMsg{write: writeSettingsAck{}})
+ return
+ }
+ if !sc.inGoAway {
+ if wm, ok := sc.writeSched.take(); ok {
+ sc.startFrameWrite(wm)
+ return
+ }
+ }
+ if sc.needsFrameFlush {
+ sc.startFrameWrite(frameWriteMsg{write: flushFrameWriter{}})
+ sc.needsFrameFlush = false // after startFrameWrite, since it sets this true
+ return
+ }
+}
+
+func (sc *serverConn) goAway(code ErrCode) {
+ sc.serveG.check()
+ if sc.inGoAway {
+ return
+ }
+ if code != ErrCodeNo {
+ sc.shutDownIn(250 * time.Millisecond)
+ } else {
+ // TODO: configurable
+ sc.shutDownIn(1 * time.Second)
+ }
+ sc.inGoAway = true
+ sc.needToSendGoAway = true
+ sc.goAwayCode = code
+ sc.scheduleFrameWrite()
+}
+
+func (sc *serverConn) shutDownIn(d time.Duration) {
+ sc.serveG.check()
+ sc.shutdownTimer = time.NewTimer(d)
+ sc.shutdownTimerCh = sc.shutdownTimer.C
+}
+
+func (sc *serverConn) resetStream(se StreamError) {
+ sc.serveG.check()
+ sc.writeFrame(frameWriteMsg{write: se})
+ if st, ok := sc.streams[se.StreamID]; ok {
+ st.sentReset = true
+ sc.closeStream(st, se)
+ }
+}
+
+// processFrameFromReader processes the serve loop's read from readFrameCh from the
+// frame-reading goroutine.
+// processFrameFromReader returns whether the connection should be kept open.
+func (sc *serverConn) processFrameFromReader(res readFrameResult) bool {
+ sc.serveG.check()
+ err := res.err
+ if err != nil {
+ if err == ErrFrameTooLarge {
+ sc.goAway(ErrCodeFrameSize)
+ return true // goAway will close the loop
+ }
+ clientGone := err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err)
+ if clientGone {
+ // TODO: could we also get into this state if
+ // the peer does a half close
+ // (e.g. CloseWrite) because they're done
+ // sending frames but they're still wanting
+ // our open replies? Investigate.
+ // TODO: add CloseWrite to crypto/tls.Conn first
+ // so we have a way to test this? I suppose
+ // just for testing we could have a non-TLS mode.
+ return false
+ }
+ } else {
+ f := res.f
+ if VerboseLogs {
+ sc.vlogf("http2: server read frame %v", summarizeFrame(f))
+ }
+ err = sc.processFrame(f)
+ if err == nil {
+ return true
+ }
+ }
+
+ switch ev := err.(type) {
+ case StreamError:
+ sc.resetStream(ev)
+ return true
+ case goAwayFlowError:
+ sc.goAway(ErrCodeFlowControl)
+ return true
+ case ConnectionError:
+ sc.logf("http2: server connection error from %v: %v", sc.conn.RemoteAddr(), ev)
+ sc.goAway(ErrCode(ev))
+ return true // goAway will handle shutdown
+ default:
+ if res.err != nil {
+ sc.vlogf("http2: server closing client connection; error reading frame from client %s: %v", sc.conn.RemoteAddr(), err)
+ } else {
+ sc.logf("http2: server closing client connection: %v", err)
+ }
+ return false
+ }
+}
+
+func (sc *serverConn) processFrame(f Frame) error {
+ sc.serveG.check()
+
+ // First frame received must be SETTINGS.
+ if !sc.sawFirstSettings {
+ if _, ok := f.(*SettingsFrame); !ok {
+ return ConnectionError(ErrCodeProtocol)
+ }
+ sc.sawFirstSettings = true
+ }
+
+ switch f := f.(type) {
+ case *SettingsFrame:
+ return sc.processSettings(f)
+ case *MetaHeadersFrame:
+ return sc.processHeaders(f)
+ case *WindowUpdateFrame:
+ return sc.processWindowUpdate(f)
+ case *PingFrame:
+ return sc.processPing(f)
+ case *DataFrame:
+ return sc.processData(f)
+ case *RSTStreamFrame:
+ return sc.processResetStream(f)
+ case *PriorityFrame:
+ return sc.processPriority(f)
+ case *PushPromiseFrame:
+ // A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE
+ // frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
+ return ConnectionError(ErrCodeProtocol)
+ default:
+ sc.vlogf("http2: server ignoring frame: %v", f.Header())
+ return nil
+ }
+}
+
+func (sc *serverConn) processPing(f *PingFrame) error {
+ sc.serveG.check()
+ if f.IsAck() {
+ // 6.7 PING: " An endpoint MUST NOT respond to PING frames
+ // containing this flag."
+ return nil
+ }
+ if f.StreamID != 0 {
+ // "PING frames are not associated with any individual
+ // stream. If a PING frame is received with a stream
+ // identifier field value other than 0x0, the recipient MUST
+ // respond with a connection error (Section 5.4.1) of type
+ // PROTOCOL_ERROR."
+ return ConnectionError(ErrCodeProtocol)
+ }
+ sc.writeFrame(frameWriteMsg{write: writePingAck{f}})
+ return nil
+}
+
+func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error {
+ sc.serveG.check()
+ switch {
+ case f.StreamID != 0: // stream-level flow control
+ st := sc.streams[f.StreamID]
+ if st == nil {
+ // "WINDOW_UPDATE can be sent by a peer that has sent a
+ // frame bearing the END_STREAM flag. This means that a
+ // receiver could receive a WINDOW_UPDATE frame on a "half
+ // closed (remote)" or "closed" stream. A receiver MUST
+ // NOT treat this as an error, see Section 5.1."
+ return nil
+ }
+ if !st.flow.add(int32(f.Increment)) {
+ return StreamError{f.StreamID, ErrCodeFlowControl}
+ }
+ default: // connection-level flow control
+ if !sc.flow.add(int32(f.Increment)) {
+ return goAwayFlowError{}
+ }
+ }
+ sc.scheduleFrameWrite()
+ return nil
+}
+
+func (sc *serverConn) processResetStream(f *RSTStreamFrame) error {
+ sc.serveG.check()
+
+ state, st := sc.state(f.StreamID)
+ if state == stateIdle {
+ // 6.4 "RST_STREAM frames MUST NOT be sent for a
+ // stream in the "idle" state. If a RST_STREAM frame
+ // identifying an idle stream is received, the
+ // recipient MUST treat this as a connection error
+ // (Section 5.4.1) of type PROTOCOL_ERROR.
+ return ConnectionError(ErrCodeProtocol)
+ }
+ if st != nil {
+ st.gotReset = true
+ st.cancelCtx()
+ sc.closeStream(st, StreamError{f.StreamID, f.ErrCode})
+ }
+ return nil
+}
+
+func (sc *serverConn) closeStream(st *stream, err error) {
+ sc.serveG.check()
+ if st.state == stateIdle || st.state == stateClosed {
+ panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state))
+ }
+ st.state = stateClosed
+ sc.curOpenStreams--
+ if sc.curOpenStreams == 0 {
+ sc.setConnState(http.StateIdle)
+ }
+ delete(sc.streams, st.id)
+ if p := st.body; p != nil {
+ p.CloseWithError(err)
+ }
+ st.cw.Close() // signals Handler's CloseNotifier, unblocks writes, etc
+ sc.writeSched.forgetStream(st.id)
+ if st.reqBuf != nil {
+ // Stash this request body buffer (64k) away for reuse
+ // by a future POST/PUT/etc.
+ //
+ // TODO(bradfitz): share on the server? sync.Pool?
+ // Server requires locks and might hurt contention.
+ // sync.Pool might work, or might be worse, depending
+ // on goroutine CPU migrations. (get and put on
+ // separate CPUs). Maybe a mix of strategies. But
+ // this is an easy win for now.
+ sc.freeRequestBodyBuf = st.reqBuf
+ }
+}
+
+func (sc *serverConn) processSettings(f *SettingsFrame) error {
+ sc.serveG.check()
+ if f.IsAck() {
+ sc.unackedSettings--
+ if sc.unackedSettings < 0 {
+ // Why is the peer ACKing settings we never sent?
+ // The spec doesn't mention this case, but
+ // hang up on them anyway.
+ return ConnectionError(ErrCodeProtocol)
+ }
+ return nil
+ }
+ if err := f.ForeachSetting(sc.processSetting); err != nil {
+ return err
+ }
+ sc.needToSendSettingsAck = true
+ sc.scheduleFrameWrite()
+ return nil
+}
+
+func (sc *serverConn) processSetting(s Setting) error {
+ sc.serveG.check()
+ if err := s.Valid(); err != nil {
+ return err
+ }
+ if VerboseLogs {
+ sc.vlogf("http2: server processing setting %v", s)
+ }
+ switch s.ID {
+ case SettingHeaderTableSize:
+ sc.headerTableSize = s.Val
+ sc.hpackEncoder.SetMaxDynamicTableSize(s.Val)
+ case SettingEnablePush:
+ sc.pushEnabled = s.Val != 0
+ case SettingMaxConcurrentStreams:
+ sc.clientMaxStreams = s.Val
+ case SettingInitialWindowSize:
+ return sc.processSettingInitialWindowSize(s.Val)
+ case SettingMaxFrameSize:
+ sc.writeSched.maxFrameSize = s.Val
+ case SettingMaxHeaderListSize:
+ sc.peerMaxHeaderListSize = s.Val
+ default:
+ // Unknown setting: "An endpoint that receives a SETTINGS
+ // frame with any unknown or unsupported identifier MUST
+ // ignore that setting."
+ if VerboseLogs {
+ sc.vlogf("http2: server ignoring unknown setting %v", s)
+ }
+ }
+ return nil
+}
+
+func (sc *serverConn) processSettingInitialWindowSize(val uint32) error {
+ sc.serveG.check()
+ // Note: val already validated to be within range by
+ // processSetting's Valid call.
+
+ // "A SETTINGS frame can alter the initial flow control window
+ // size for all current streams. When the value of
+ // SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST
+ // adjust the size of all stream flow control windows that it
+ // maintains by the difference between the new value and the
+ // old value."
+ old := sc.initialWindowSize
+ sc.initialWindowSize = int32(val)
+ growth := sc.initialWindowSize - old // may be negative
+ for _, st := range sc.streams {
+ if !st.flow.add(growth) {
+ // 6.9.2 Initial Flow Control Window Size
+ // "An endpoint MUST treat a change to
+ // SETTINGS_INITIAL_WINDOW_SIZE that causes any flow
+ // control window to exceed the maximum size as a
+ // connection error (Section 5.4.1) of type
+ // FLOW_CONTROL_ERROR."
+ return ConnectionError(ErrCodeFlowControl)
+ }
+ }
+ return nil
+}
+
+func (sc *serverConn) processData(f *DataFrame) error {
+ sc.serveG.check()
+ // "If a DATA frame is received whose stream is not in "open"
+ // or "half closed (local)" state, the recipient MUST respond
+ // with a stream error (Section 5.4.2) of type STREAM_CLOSED."
+ id := f.Header().StreamID
+ st, ok := sc.streams[id]
+ if !ok || st.state != stateOpen || st.gotTrailerHeader {
+ // This includes sending a RST_STREAM if the stream is
+ // in stateHalfClosedLocal (which currently means that
+ // the http.Handler returned, so it's done reading &
+ // done writing). Try to stop the client from sending
+ // more DATA.
+ return StreamError{id, ErrCodeStreamClosed}
+ }
+ if st.body == nil {
+ panic("internal error: should have a body in this state")
+ }
+ data := f.Data()
+
+ // Sender sending more than they'd declared?
+ if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes {
+ st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes))
+ return StreamError{id, ErrCodeStreamClosed}
+ }
+ if len(data) > 0 {
+ // Check whether the client has flow control quota.
+ if int(st.inflow.available()) < len(data) {
+ return StreamError{id, ErrCodeFlowControl}
+ }
+ st.inflow.take(int32(len(data)))
+ wrote, err := st.body.Write(data)
+ if err != nil {
+ return StreamError{id, ErrCodeStreamClosed}
+ }
+ if wrote != len(data) {
+ panic("internal error: bad Writer")
+ }
+ st.bodyBytes += int64(len(data))
+ }
+ if f.StreamEnded() {
+ st.endStream()
+ }
+ return nil
+}
+
+// endStream closes a Request.Body's pipe. It is called when a DATA
+// frame says a request body is over (or after trailers).
+func (st *stream) endStream() {
+ sc := st.sc
+ sc.serveG.check()
+
+ if st.declBodyBytes != -1 && st.declBodyBytes != st.bodyBytes {
+ st.body.CloseWithError(fmt.Errorf("request declared a Content-Length of %d but only wrote %d bytes",
+ st.declBodyBytes, st.bodyBytes))
+ } else {
+ st.body.closeWithErrorAndCode(io.EOF, st.copyTrailersToHandlerRequest)
+ st.body.CloseWithError(io.EOF)
+ }
+ st.state = stateHalfClosedRemote
+}
+
+// copyTrailersToHandlerRequest is run in the Handler's goroutine in
+// its Request.Body.Read just before it gets io.EOF.
+func (st *stream) copyTrailersToHandlerRequest() {
+ for k, vv := range st.trailer {
+ if _, ok := st.reqTrailer[k]; ok {
+ // Only copy it over it was pre-declared.
+ st.reqTrailer[k] = vv
+ }
+ }
+}
+
+func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
+ sc.serveG.check()
+ id := f.Header().StreamID
+ if sc.inGoAway {
+ // Ignore.
+ return nil
+ }
+ // http://http2.github.io/http2-spec/#rfc.section.5.1.1
+ // Streams initiated by a client MUST use odd-numbered stream
+ // identifiers. [...] An endpoint that receives an unexpected
+ // stream identifier MUST respond with a connection error
+ // (Section 5.4.1) of type PROTOCOL_ERROR.
+ if id%2 != 1 {
+ return ConnectionError(ErrCodeProtocol)
+ }
+ // A HEADERS frame can be used to create a new stream or
+ // send a trailer for an open one. If we already have a stream
+ // open, let it process its own HEADERS frame (trailers at this
+ // point, if it's valid).
+ st := sc.streams[f.Header().StreamID]
+ if st != nil {
+ return st.processTrailerHeaders(f)
+ }
+
+ // [...] The identifier of a newly established stream MUST be
+ // numerically greater than all streams that the initiating
+ // endpoint has opened or reserved. [...] An endpoint that
+ // receives an unexpected stream identifier MUST respond with
+ // a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
+ if id <= sc.maxStreamID {
+ return ConnectionError(ErrCodeProtocol)
+ }
+ sc.maxStreamID = id
+
+ ctx, cancelCtx := contextWithCancel(sc.baseCtx)
+ st = &stream{
+ sc: sc,
+ id: id,
+ state: stateOpen,
+ ctx: ctx,
+ cancelCtx: cancelCtx,
+ }
+ if f.StreamEnded() {
+ st.state = stateHalfClosedRemote
+ }
+ st.cw.Init()
+
+ st.flow.conn = &sc.flow // link to conn-level counter
+ st.flow.add(sc.initialWindowSize)
+ st.inflow.conn = &sc.inflow // link to conn-level counter
+ st.inflow.add(initialWindowSize) // TODO: update this when we send a higher initial window size in the initial settings
+
+ sc.streams[id] = st
+ if f.HasPriority() {
+ adjustStreamPriority(sc.streams, st.id, f.Priority)
+ }
+ sc.curOpenStreams++
+ if sc.curOpenStreams == 1 {
+ sc.setConnState(http.StateActive)
+ }
+ if sc.curOpenStreams > sc.advMaxStreams {
+ // "Endpoints MUST NOT exceed the limit set by their
+ // peer. An endpoint that receives a HEADERS frame
+ // that causes their advertised concurrent stream
+ // limit to be exceeded MUST treat this as a stream
+ // error (Section 5.4.2) of type PROTOCOL_ERROR or
+ // REFUSED_STREAM."
+ if sc.unackedSettings == 0 {
+ // They should know better.
+ return StreamError{st.id, ErrCodeProtocol}
+ }
+ // Assume it's a network race, where they just haven't
+ // received our last SETTINGS update. But actually
+ // this can't happen yet, because we don't yet provide
+ // a way for users to adjust server parameters at
+ // runtime.
+ return StreamError{st.id, ErrCodeRefusedStream}
+ }
+
+ rw, req, err := sc.newWriterAndRequest(st, f)
+ if err != nil {
+ return err
+ }
+ st.reqTrailer = req.Trailer
+ if st.reqTrailer != nil {
+ st.trailer = make(http.Header)
+ }
+ st.body = req.Body.(*requestBody).pipe // may be nil
+ st.declBodyBytes = req.ContentLength
+
+ handler := sc.handler.ServeHTTP
+ if f.Truncated {
+ // Their header list was too long. Send a 431 error.
+ handler = handleHeaderListTooLong
+ } else if err := checkValidHTTP2Request(req); err != nil {
+ handler = new400Handler(err)
+ }
+
+ go sc.runHandler(rw, req, handler)
+ return nil
+}
+
+func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
+ sc := st.sc
+ sc.serveG.check()
+ if st.gotTrailerHeader {
+ return ConnectionError(ErrCodeProtocol)
+ }
+ st.gotTrailerHeader = true
+ if !f.StreamEnded() {
+ return StreamError{st.id, ErrCodeProtocol}
+ }
+
+ if len(f.PseudoFields()) > 0 {
+ return StreamError{st.id, ErrCodeProtocol}
+ }
+ if st.trailer != nil {
+ for _, hf := range f.RegularFields() {
+ key := sc.canonicalHeader(hf.Name)
+ if !ValidTrailerHeader(key) {
+ // TODO: send more details to the peer somehow. But http2 has
+ // no way to send debug data at a stream level. Discuss with
+ // HTTP folk.
+ return StreamError{st.id, ErrCodeProtocol}
+ }
+ st.trailer[key] = append(st.trailer[key], hf.Value)
+ }
+ }
+ st.endStream()
+ return nil
+}
+
+func (sc *serverConn) processPriority(f *PriorityFrame) error {
+ adjustStreamPriority(sc.streams, f.StreamID, f.PriorityParam)
+ return nil
+}
+
+func adjustStreamPriority(streams map[uint32]*stream, streamID uint32, priority PriorityParam) {
+ st, ok := streams[streamID]
+ if !ok {
+ // TODO: not quite correct (this streamID might
+ // already exist in the dep tree, but be closed), but
+ // close enough for now.
+ return
+ }
+ st.weight = priority.Weight
+ parent := streams[priority.StreamDep] // might be nil
+ if parent == st {
+ // if client tries to set this stream to be the parent of itself
+ // ignore and keep going
+ return
+ }
+
+ // section 5.3.3: If a stream is made dependent on one of its
+ // own dependencies, the formerly dependent stream is first
+ // moved to be dependent on the reprioritized stream's previous
+ // parent. The moved dependency retains its weight.
+ for piter := parent; piter != nil; piter = piter.parent {
+ if piter == st {
+ parent.parent = st.parent
+ break
+ }
+ }
+ st.parent = parent
+ if priority.Exclusive && (st.parent != nil || priority.StreamDep == 0) {
+ for _, openStream := range streams {
+ if openStream != st && openStream.parent == st.parent {
+ openStream.parent = st
+ }
+ }
+ }
+}
+
+func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) {
+ sc.serveG.check()
+
+ method := f.PseudoValue("method")
+ path := f.PseudoValue("path")
+ scheme := f.PseudoValue("scheme")
+ authority := f.PseudoValue("authority")
+
+ isConnect := method == "CONNECT"
+ if isConnect {
+ if path != "" || scheme != "" || authority == "" {
+ return nil, nil, StreamError{f.StreamID, ErrCodeProtocol}
+ }
+ } else if method == "" || path == "" ||
+ (scheme != "https" && scheme != "http") {
+ // See 8.1.2.6 Malformed Requests and Responses:
+ //
+ // Malformed requests or responses that are detected
+ // MUST be treated as a stream error (Section 5.4.2)
+ // of type PROTOCOL_ERROR."
+ //
+ // 8.1.2.3 Request Pseudo-Header Fields
+ // "All HTTP/2 requests MUST include exactly one valid
+ // value for the :method, :scheme, and :path
+ // pseudo-header fields"
+ return nil, nil, StreamError{f.StreamID, ErrCodeProtocol}
+ }
+
+ bodyOpen := !f.StreamEnded()
+ if method == "HEAD" && bodyOpen {
+ // HEAD requests can't have bodies
+ return nil, nil, StreamError{f.StreamID, ErrCodeProtocol}
+ }
+ var tlsState *tls.ConnectionState // nil if not scheme https
+
+ if scheme == "https" {
+ tlsState = sc.tlsState
+ }
+
+ header := make(http.Header)
+ for _, hf := range f.RegularFields() {
+ header.Add(sc.canonicalHeader(hf.Name), hf.Value)
+ }
+
+ if authority == "" {
+ authority = header.Get("Host")
+ }
+ needsContinue := header.Get("Expect") == "100-continue"
+ if needsContinue {
+ header.Del("Expect")
+ }
+ // Merge Cookie headers into one "; "-delimited value.
+ if cookies := header["Cookie"]; len(cookies) > 1 {
+ header.Set("Cookie", strings.Join(cookies, "; "))
+ }
+
+ // Setup Trailers
+ var trailer http.Header
+ for _, v := range header["Trailer"] {
+ for _, key := range strings.Split(v, ",") {
+ key = http.CanonicalHeaderKey(strings.TrimSpace(key))
+ switch key {
+ case "Transfer-Encoding", "Trailer", "Content-Length":
+ // Bogus. (copy of http1 rules)
+ // Ignore.
+ default:
+ if trailer == nil {
+ trailer = make(http.Header)
+ }
+ trailer[key] = nil
+ }
+ }
+ }
+ delete(header, "Trailer")
+
+ body := &requestBody{
+ conn: sc,
+ stream: st,
+ needsContinue: needsContinue,
+ }
+ var url_ *url.URL
+ var requestURI string
+ if isConnect {
+ url_ = &url.URL{Host: authority}
+ requestURI = authority // mimic HTTP/1 server behavior
+ } else {
+ var err error
+ url_, err = url.ParseRequestURI(path)
+ if err != nil {
+ return nil, nil, StreamError{f.StreamID, ErrCodeProtocol}
+ }
+ requestURI = path
+ }
+ req := &http.Request{
+ Method: method,
+ URL: url_,
+ RemoteAddr: sc.remoteAddrStr,
+ Header: header,
+ RequestURI: requestURI,
+ Proto: "HTTP/2.0",
+ ProtoMajor: 2,
+ ProtoMinor: 0,
+ TLS: tlsState,
+ Host: authority,
+ Body: body,
+ Trailer: trailer,
+ }
+ req = requestWithContext(req, st.ctx)
+ if bodyOpen {
+ // Disabled, per golang.org/issue/14960:
+ // st.reqBuf = sc.getRequestBodyBuf()
+ // TODO: remove this 64k of garbage per request (again, but without a data race):
+ buf := make([]byte, initialWindowSize)
+
+ body.pipe = &pipe{
+ b: &fixedBuffer{buf: buf},
+ }
+
+ if vv, ok := header["Content-Length"]; ok {
+ req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64)
+ } else {
+ req.ContentLength = -1
+ }
+ }
+
+ rws := responseWriterStatePool.Get().(*responseWriterState)
+ bwSave := rws.bw
+ *rws = responseWriterState{} // zero all the fields
+ rws.conn = sc
+ rws.bw = bwSave
+ rws.bw.Reset(chunkWriter{rws})
+ rws.stream = st
+ rws.req = req
+ rws.body = body
+
+ rw := &responseWriter{rws: rws}
+ return rw, req, nil
+}
+
+func (sc *serverConn) getRequestBodyBuf() []byte {
+ sc.serveG.check()
+ if buf := sc.freeRequestBodyBuf; buf != nil {
+ sc.freeRequestBodyBuf = nil
+ return buf
+ }
+ return make([]byte, initialWindowSize)
+}
+
+// Run on its own goroutine.
+func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
+ didPanic := true
+ defer func() {
+ rw.rws.stream.cancelCtx()
+ if didPanic {
+ e := recover()
+ // Same as net/http:
+ const size = 64 << 10
+ buf := make([]byte, size)
+ buf = buf[:runtime.Stack(buf, false)]
+ sc.writeFrameFromHandler(frameWriteMsg{
+ write: handlerPanicRST{rw.rws.stream.id},
+ stream: rw.rws.stream,
+ })
+ sc.logf("http2: panic serving %v: %v\n%s", sc.conn.RemoteAddr(), e, buf)
+ return
+ }
+ rw.handlerDone()
+ }()
+ handler(rw, req)
+ didPanic = false
+}
+
+func handleHeaderListTooLong(w http.ResponseWriter, r *http.Request) {
+ // 10.5.1 Limits on Header Block Size:
+ // .. "A server that receives a larger header block than it is
+ // willing to handle can send an HTTP 431 (Request Header Fields Too
+ // Large) status code"
+ const statusRequestHeaderFieldsTooLarge = 431 // only in Go 1.6+
+ w.WriteHeader(statusRequestHeaderFieldsTooLarge)
+ io.WriteString(w, "<h1>HTTP Error 431</h1><p>Request Header Field(s) Too Large</p>")
+}
+
+// called from handler goroutines.
+// h may be nil.
+func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) error {
+ sc.serveG.checkNotOn() // NOT on
+ var errc chan error
+ if headerData.h != nil {
+ // If there's a header map (which we don't own), so we have to block on
+ // waiting for this frame to be written, so an http.Flush mid-handler
+ // writes out the correct value of keys, before a handler later potentially
+ // mutates it.
+ errc = errChanPool.Get().(chan error)
+ }
+ if err := sc.writeFrameFromHandler(frameWriteMsg{
+ write: headerData,
+ stream: st,
+ done: errc,
+ }); err != nil {
+ return err
+ }
+ if errc != nil {
+ select {
+ case err := <-errc:
+ errChanPool.Put(errc)
+ return err
+ case <-sc.doneServing:
+ return errClientDisconnected
+ case <-st.cw:
+ return errStreamClosed
+ }
+ }
+ return nil
+}
+
+// called from handler goroutines.
+func (sc *serverConn) write100ContinueHeaders(st *stream) {
+ sc.writeFrameFromHandler(frameWriteMsg{
+ write: write100ContinueHeadersFrame{st.id},
+ stream: st,
+ })
+}
+
+// A bodyReadMsg tells the server loop that the http.Handler read n
+// bytes of the DATA from the client on the given stream.
+type bodyReadMsg struct {
+ st *stream
+ n int
+}
+
+// called from handler goroutines.
+// Notes that the handler for the given stream ID read n bytes of its body
+// and schedules flow control tokens to be sent.
+func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int) {
+ sc.serveG.checkNotOn() // NOT on
+ select {
+ case sc.bodyReadCh <- bodyReadMsg{st, n}:
+ case <-sc.doneServing:
+ }
+}
+
+func (sc *serverConn) noteBodyRead(st *stream, n int) {
+ sc.serveG.check()
+ sc.sendWindowUpdate(nil, n) // conn-level
+ if st.state != stateHalfClosedRemote && st.state != stateClosed {
+ // Don't send this WINDOW_UPDATE if the stream is closed
+ // remotely.
+ sc.sendWindowUpdate(st, n)
+ }
+}
+
+// st may be nil for conn-level
+func (sc *serverConn) sendWindowUpdate(st *stream, n int) {
+ sc.serveG.check()
+ // "The legal range for the increment to the flow control
+ // window is 1 to 2^31-1 (2,147,483,647) octets."
+ // A Go Read call on 64-bit machines could in theory read
+ // a larger Read than this. Very unlikely, but we handle it here
+ // rather than elsewhere for now.
+ const maxUint31 = 1<<31 - 1
+ for n >= maxUint31 {
+ sc.sendWindowUpdate32(st, maxUint31)
+ n -= maxUint31
+ }
+ sc.sendWindowUpdate32(st, int32(n))
+}
+
+// st may be nil for conn-level
+func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) {
+ sc.serveG.check()
+ if n == 0 {
+ return
+ }
+ if n < 0 {
+ panic("negative update")
+ }
+ var streamID uint32
+ if st != nil {
+ streamID = st.id
+ }
+ sc.writeFrame(frameWriteMsg{
+ write: writeWindowUpdate{streamID: streamID, n: uint32(n)},
+ stream: st,
+ })
+ var ok bool
+ if st == nil {
+ ok = sc.inflow.add(n)
+ } else {
+ ok = st.inflow.add(n)
+ }
+ if !ok {
+ panic("internal error; sent too many window updates without decrements?")
+ }
+}
+
+type requestBody struct {
+ stream *stream
+ conn *serverConn
+ closed bool
+ pipe *pipe // non-nil if we have a HTTP entity message body
+ needsContinue bool // need to send a 100-continue
+}
+
+func (b *requestBody) Close() error {
+ if b.pipe != nil {
+ b.pipe.BreakWithError(errClosedBody)
+ }
+ b.closed = true
+ return nil
+}
+
+func (b *requestBody) Read(p []byte) (n int, err error) {
+ if b.needsContinue {
+ b.needsContinue = false
+ b.conn.write100ContinueHeaders(b.stream)
+ }
+ if b.pipe == nil {
+ return 0, io.EOF
+ }
+ n, err = b.pipe.Read(p)
+ if n > 0 {
+ b.conn.noteBodyReadFromHandler(b.stream, n)
+ }
+ return
+}
+
+// responseWriter is the http.ResponseWriter implementation. It's
+// intentionally small (1 pointer wide) to minimize garbage. The
+// responseWriterState pointer inside is zeroed at the end of a
+// request (in handlerDone) and calls on the responseWriter thereafter
+// simply crash (caller's mistake), but the much larger responseWriterState
+// and buffers are reused between multiple requests.
+type responseWriter struct {
+ rws *responseWriterState
+}
+
+// Optional http.ResponseWriter interfaces implemented.
+var (
+ _ http.CloseNotifier = (*responseWriter)(nil)
+ _ http.Flusher = (*responseWriter)(nil)
+ _ stringWriter = (*responseWriter)(nil)
+)
+
+type responseWriterState struct {
+ // immutable within a request:
+ stream *stream
+ req *http.Request
+ body *requestBody // to close at end of request, if DATA frames didn't
+ conn *serverConn
+
+ // TODO: adjust buffer writing sizes based on server config, frame size updates from peer, etc
+ bw *bufio.Writer // writing to a chunkWriter{this *responseWriterState}
+
+ // mutated by http.Handler goroutine:
+ handlerHeader http.Header // nil until called
+ snapHeader http.Header // snapshot of handlerHeader at WriteHeader time
+ trailers []string // set in writeChunk
+ status int // status code passed to WriteHeader
+ wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet.
+ sentHeader bool // have we sent the header frame?
+ handlerDone bool // handler has finished
+
+ sentContentLen int64 // non-zero if handler set a Content-Length header
+ wroteBytes int64
+
+ closeNotifierMu sync.Mutex // guards closeNotifierCh
+ closeNotifierCh chan bool // nil until first used
+}
+
+type chunkWriter struct{ rws *responseWriterState }
+
+func (cw chunkWriter) Write(p []byte) (n int, err error) { return cw.rws.writeChunk(p) }
+
+func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != 0 }
+
+// declareTrailer is called for each Trailer header when the
+// response header is written. It notes that a header will need to be
+// written in the trailers at the end of the response.
+func (rws *responseWriterState) declareTrailer(k string) {
+ k = http.CanonicalHeaderKey(k)
+ if !ValidTrailerHeader(k) {
+ // Forbidden by RFC 2616 14.40.
+ rws.conn.logf("ignoring invalid trailer %q", k)
+ return
+ }
+ if !strSliceContains(rws.trailers, k) {
+ rws.trailers = append(rws.trailers, k)
+ }
+}
+
+// writeChunk writes chunks from the bufio.Writer. But because
+// bufio.Writer may bypass its chunking, sometimes p may be
+// arbitrarily large.
+//
+// writeChunk is also responsible (on the first chunk) for sending the
+// HEADER response.
+func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
+ if !rws.wroteHeader {
+ rws.writeHeader(200)
+ }
+
+ isHeadResp := rws.req.Method == "HEAD"
+ if !rws.sentHeader {
+ rws.sentHeader = true
+ var ctype, clen string
+ if clen = rws.snapHeader.Get("Content-Length"); clen != "" {
+ rws.snapHeader.Del("Content-Length")
+ clen64, err := strconv.ParseInt(clen, 10, 64)
+ if err == nil && clen64 >= 0 {
+ rws.sentContentLen = clen64
+ } else {
+ clen = ""
+ }
+ }
+ if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) {
+ clen = strconv.Itoa(len(p))
+ }
+ _, hasContentType := rws.snapHeader["Content-Type"]
+ if !hasContentType && bodyAllowedForStatus(rws.status) {
+ ctype = http.DetectContentType(p)
+ }
+ var date string
+ if _, ok := rws.snapHeader["Date"]; !ok {
+ // TODO(bradfitz): be faster here, like net/http? measure.
+ date = time.Now().UTC().Format(http.TimeFormat)
+ }
+
+ for _, v := range rws.snapHeader["Trailer"] {
+ foreachHeaderElement(v, rws.declareTrailer)
+ }
+
+ endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp
+ err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{
+ streamID: rws.stream.id,
+ httpResCode: rws.status,
+ h: rws.snapHeader,
+ endStream: endStream,
+ contentType: ctype,
+ contentLength: clen,
+ date: date,
+ })
+ if err != nil {
+ return 0, err
+ }
+ if endStream {
+ return 0, nil
+ }
+ }
+ if isHeadResp {
+ return len(p), nil
+ }
+ if len(p) == 0 && !rws.handlerDone {
+ return 0, nil
+ }
+
+ if rws.handlerDone {
+ rws.promoteUndeclaredTrailers()
+ }
+
+ endStream := rws.handlerDone && !rws.hasTrailers()
+ if len(p) > 0 || endStream {
+ // only send a 0 byte DATA frame if we're ending the stream.
+ if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil {
+ return 0, err
+ }
+ }
+
+ if rws.handlerDone && rws.hasTrailers() {
+ err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{
+ streamID: rws.stream.id,
+ h: rws.handlerHeader,
+ trailers: rws.trailers,
+ endStream: true,
+ })
+ return len(p), err
+ }
+ return len(p), nil
+}
+
+// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys
+// that, if present, signals that the map entry is actually for
+// the response trailers, and not the response headers. The prefix
+// is stripped after the ServeHTTP call finishes and the values are
+// sent in the trailers.
+//
+// This mechanism is intended only for trailers that are not known
+// prior to the headers being written. If the set of trailers is fixed
+// or known before the header is written, the normal Go trailers mechanism
+// is preferred:
+// https://golang.org/pkg/net/http/#ResponseWriter
+// https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
+const TrailerPrefix = "Trailer:"
+
+// promoteUndeclaredTrailers permits http.Handlers to set trailers
+// after the header has already been flushed. Because the Go
+// ResponseWriter interface has no way to set Trailers (only the
+// Header), and because we didn't want to expand the ResponseWriter
+// interface, and because nobody used trailers, and because RFC 2616
+// says you SHOULD (but not must) predeclare any trailers in the
+// header, the official ResponseWriter rules said trailers in Go must
+// be predeclared, and then we reuse the same ResponseWriter.Header()
+// map to mean both Headers and Trailers. When it's time to write the
+// Trailers, we pick out the fields of Headers that were declared as
+// trailers. That worked for a while, until we found the first major
+// user of Trailers in the wild: gRPC (using them only over http2),
+// and gRPC libraries permit setting trailers mid-stream without
+// predeclarnig them. So: change of plans. We still permit the old
+// way, but we also permit this hack: if a Header() key begins with
+// "Trailer:", the suffix of that key is a Trailer. Because ':' is an
+// invalid token byte anyway, there is no ambiguity. (And it's already
+// filtered out) It's mildly hacky, but not terrible.
+//
+// This method runs after the Handler is done and promotes any Header
+// fields to be trailers.
+func (rws *responseWriterState) promoteUndeclaredTrailers() {
+ for k, vv := range rws.handlerHeader {
+ if !strings.HasPrefix(k, TrailerPrefix) {
+ continue
+ }
+ trailerKey := strings.TrimPrefix(k, TrailerPrefix)
+ rws.declareTrailer(trailerKey)
+ rws.handlerHeader[http.CanonicalHeaderKey(trailerKey)] = vv
+ }
+
+ if len(rws.trailers) > 1 {
+ sorter := sorterPool.Get().(*sorter)
+ sorter.SortStrings(rws.trailers)
+ sorterPool.Put(sorter)
+ }
+}
+
+func (w *responseWriter) Flush() {
+ rws := w.rws
+ if rws == nil {
+ panic("Header called after Handler finished")
+ }
+ if rws.bw.Buffered() > 0 {
+ if err := rws.bw.Flush(); err != nil {
+ // Ignore the error. The frame writer already knows.
+ return
+ }
+ } else {
+ // The bufio.Writer won't call chunkWriter.Write
+ // (writeChunk with zero bytes, so we have to do it
+ // ourselves to force the HTTP response header and/or
+ // final DATA frame (with END_STREAM) to be sent.
+ rws.writeChunk(nil)
+ }
+}
+
+func (w *responseWriter) CloseNotify() <-chan bool {
+ rws := w.rws
+ if rws == nil {
+ panic("CloseNotify called after Handler finished")
+ }
+ rws.closeNotifierMu.Lock()
+ ch := rws.closeNotifierCh
+ if ch == nil {
+ ch = make(chan bool, 1)
+ rws.closeNotifierCh = ch
+ go func() {
+ rws.stream.cw.Wait() // wait for close
+ ch <- true
+ }()
+ }
+ rws.closeNotifierMu.Unlock()
+ return ch
+}
+
+func (w *responseWriter) Header() http.Header {
+ rws := w.rws
+ if rws == nil {
+ panic("Header called after Handler finished")
+ }
+ if rws.handlerHeader == nil {
+ rws.handlerHeader = make(http.Header)
+ }
+ return rws.handlerHeader
+}
+
+func (w *responseWriter) WriteHeader(code int) {
+ rws := w.rws
+ if rws == nil {
+ panic("WriteHeader called after Handler finished")
+ }
+ rws.writeHeader(code)
+}
+
+func (rws *responseWriterState) writeHeader(code int) {
+ if !rws.wroteHeader {
+ rws.wroteHeader = true
+ rws.status = code
+ if len(rws.handlerHeader) > 0 {
+ rws.snapHeader = cloneHeader(rws.handlerHeader)
+ }
+ }
+}
+
+func cloneHeader(h http.Header) http.Header {
+ h2 := make(http.Header, len(h))
+ for k, vv := range h {
+ vv2 := make([]string, len(vv))
+ copy(vv2, vv)
+ h2[k] = vv2
+ }
+ return h2
+}
+
+// The Life Of A Write is like this:
+//
+// * Handler calls w.Write or w.WriteString ->
+// * -> rws.bw (*bufio.Writer) ->
+// * (Handler migth call Flush)
+// * -> chunkWriter{rws}
+// * -> responseWriterState.writeChunk(p []byte)
+// * -> responseWriterState.writeChunk (most of the magic; see comment there)
+func (w *responseWriter) Write(p []byte) (n int, err error) {
+ return w.write(len(p), p, "")
+}
+
+func (w *responseWriter) WriteString(s string) (n int, err error) {
+ return w.write(len(s), nil, s)
+}
+
+// either dataB or dataS is non-zero.
+func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, err error) {
+ rws := w.rws
+ if rws == nil {
+ panic("Write called after Handler finished")
+ }
+ if !rws.wroteHeader {
+ w.WriteHeader(200)
+ }
+ if !bodyAllowedForStatus(rws.status) {
+ return 0, http.ErrBodyNotAllowed
+ }
+ rws.wroteBytes += int64(len(dataB)) + int64(len(dataS)) // only one can be set
+ if rws.sentContentLen != 0 && rws.wroteBytes > rws.sentContentLen {
+ // TODO: send a RST_STREAM
+ return 0, errors.New("http2: handler wrote more than declared Content-Length")
+ }
+
+ if dataB != nil {
+ return rws.bw.Write(dataB)
+ } else {
+ return rws.bw.WriteString(dataS)
+ }
+}
+
+func (w *responseWriter) handlerDone() {
+ rws := w.rws
+ rws.handlerDone = true
+ w.Flush()
+ w.rws = nil
+ responseWriterStatePool.Put(rws)
+}
+
+// foreachHeaderElement splits v according to the "#rule" construction
+// in RFC 2616 section 2.1 and calls fn for each non-empty element.
+func foreachHeaderElement(v string, fn func(string)) {
+ v = textproto.TrimString(v)
+ if v == "" {
+ return
+ }
+ if !strings.Contains(v, ",") {
+ fn(v)
+ return
+ }
+ for _, f := range strings.Split(v, ",") {
+ if f = textproto.TrimString(f); f != "" {
+ fn(f)
+ }
+ }
+}
+
+// From http://httpwg.org/specs/rfc7540.html#rfc.section.8.1.2.2
+var connHeaders = []string{
+ "Connection",
+ "Keep-Alive",
+ "Proxy-Connection",
+ "Transfer-Encoding",
+ "Upgrade",
+}
+
+// checkValidHTTP2Request checks whether req is a valid HTTP/2 request,
+// per RFC 7540 Section 8.1.2.2.
+// The returned error is reported to users.
+func checkValidHTTP2Request(req *http.Request) error {
+ for _, h := range connHeaders {
+ if _, ok := req.Header[h]; ok {
+ return fmt.Errorf("request header %q is not valid in HTTP/2", h)
+ }
+ }
+ te := req.Header["Te"]
+ if len(te) > 0 && (len(te) > 1 || (te[0] != "trailers" && te[0] != "")) {
+ return errors.New(`request header "TE" may only be "trailers" in HTTP/2`)
+ }
+ return nil
+}
+
+func new400Handler(err error) http.HandlerFunc {
+ return func(w http.ResponseWriter, r *http.Request) {
+ http.Error(w, err.Error(), http.StatusBadRequest)
+ }
+}
+
+// ValidTrailerHeader reports whether name is a valid header field name to appear
+// in trailers.
+// See: http://tools.ietf.org/html/rfc7230#section-4.1.2
+func ValidTrailerHeader(name string) bool {
+ name = http.CanonicalHeaderKey(name)
+ if strings.HasPrefix(name, "If-") || badTrailer[name] {
+ return false
+ }
+ return true
+}
+
+var badTrailer = map[string]bool{
+ "Authorization": true,
+ "Cache-Control": true,
+ "Connection": true,
+ "Content-Encoding": true,
+ "Content-Length": true,
+ "Content-Range": true,
+ "Content-Type": true,
+ "Expect": true,
+ "Host": true,
+ "Keep-Alive": true,
+ "Max-Forwards": true,
+ "Pragma": true,
+ "Proxy-Authenticate": true,
+ "Proxy-Authorization": true,
+ "Proxy-Connection": true,
+ "Range": true,
+ "Realm": true,
+ "Te": true,
+ "Trailer": true,
+ "Transfer-Encoding": true,
+ "Www-Authenticate": true,
+}
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
new file mode 100644
index 0000000..2ae7437
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -0,0 +1,1816 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Transport code.
+
+package http2
+
+import (
+ "bufio"
+ "bytes"
+ "compress/gzip"
+ "crypto/tls"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "net"
+ "net/http"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/http2/hpack"
+ "golang.org/x/net/lex/httplex"
+)
+
+const (
+ // transportDefaultConnFlow is how many connection-level flow control
+ // tokens we give the server at start-up, past the default 64k.
+ transportDefaultConnFlow = 1 << 30
+
+ // transportDefaultStreamFlow is how many stream-level flow
+ // control tokens we announce to the peer, and how many bytes
+ // we buffer per stream.
+ transportDefaultStreamFlow = 4 << 20
+
+ // transportDefaultStreamMinRefresh is the minimum number of bytes we'll send
+ // a stream-level WINDOW_UPDATE for at a time.
+ transportDefaultStreamMinRefresh = 4 << 10
+
+ defaultUserAgent = "Go-http-client/2.0"
+)
+
+// Transport is an HTTP/2 Transport.
+//
+// A Transport internally caches connections to servers. It is safe
+// for concurrent use by multiple goroutines.
+type Transport struct {
+ // DialTLS specifies an optional dial function for creating
+ // TLS connections for requests.
+ //
+ // If DialTLS is nil, tls.Dial is used.
+ //
+ // If the returned net.Conn has a ConnectionState method like tls.Conn,
+ // it will be used to set http.Response.TLS.
+ DialTLS func(network, addr string, cfg *tls.Config) (net.Conn, error)
+
+ // TLSClientConfig specifies the TLS configuration to use with
+ // tls.Client. If nil, the default configuration is used.
+ TLSClientConfig *tls.Config
+
+ // ConnPool optionally specifies an alternate connection pool to use.
+ // If nil, the default is used.
+ ConnPool ClientConnPool
+
+ // DisableCompression, if true, prevents the Transport from
+ // requesting compression with an "Accept-Encoding: gzip"
+ // request header when the Request contains no existing
+ // Accept-Encoding value. If the Transport requests gzip on
+ // its own and gets a gzipped response, it's transparently
+ // decoded in the Response.Body. However, if the user
+ // explicitly requested gzip it is not automatically
+ // uncompressed.
+ DisableCompression bool
+
+ // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to
+ // send in the initial settings frame. It is how many bytes
+ // of response headers are allow. Unlike the http2 spec, zero here
+ // means to use a default limit (currently 10MB). If you actually
+ // want to advertise an ulimited value to the peer, Transport
+ // interprets the highest possible value here (0xffffffff or 1<<32-1)
+ // to mean no limit.
+ MaxHeaderListSize uint32
+
+ // t1, if non-nil, is the standard library Transport using
+ // this transport. Its settings are used (but not its
+ // RoundTrip method, etc).
+ t1 *http.Transport
+
+ connPoolOnce sync.Once
+ connPoolOrDef ClientConnPool // non-nil version of ConnPool
+}
+
+func (t *Transport) maxHeaderListSize() uint32 {
+ if t.MaxHeaderListSize == 0 {
+ return 10 << 20
+ }
+ if t.MaxHeaderListSize == 0xffffffff {
+ return 0
+ }
+ return t.MaxHeaderListSize
+}
+
+func (t *Transport) disableCompression() bool {
+ return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression)
+}
+
+var errTransportVersion = errors.New("http2: ConfigureTransport is only supported starting at Go 1.6")
+
+// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2.
+// It requires Go 1.6 or later and returns an error if the net/http package is too old
+// or if t1 has already been HTTP/2-enabled.
+func ConfigureTransport(t1 *http.Transport) error {
+ _, err := configureTransport(t1) // in configure_transport.go (go1.6) or not_go16.go
+ return err
+}
+
+func (t *Transport) connPool() ClientConnPool {
+ t.connPoolOnce.Do(t.initConnPool)
+ return t.connPoolOrDef
+}
+
+func (t *Transport) initConnPool() {
+ if t.ConnPool != nil {
+ t.connPoolOrDef = t.ConnPool
+ } else {
+ t.connPoolOrDef = &clientConnPool{t: t}
+ }
+}
+
+// ClientConn is the state of a single HTTP/2 client connection to an
+// HTTP/2 server.
+type ClientConn struct {
+ t *Transport
+ tconn net.Conn // usually *tls.Conn, except specialized impls
+ tlsState *tls.ConnectionState // nil only for specialized impls
+
+ // readLoop goroutine fields:
+ readerDone chan struct{} // closed on error
+ readerErr error // set before readerDone is closed
+
+ mu sync.Mutex // guards following
+ cond *sync.Cond // hold mu; broadcast on flow/closed changes
+ flow flow // our conn-level flow control quota (cs.flow is per stream)
+ inflow flow // peer's conn-level flow control
+ closed bool
+ goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received
+ streams map[uint32]*clientStream // client-initiated
+ nextStreamID uint32
+ bw *bufio.Writer
+ br *bufio.Reader
+ fr *Framer
+ lastActive time.Time
+
+ // Settings from peer:
+ maxFrameSize uint32
+ maxConcurrentStreams uint32
+ initialWindowSize uint32
+ hbuf bytes.Buffer // HPACK encoder writes into this
+ henc *hpack.Encoder
+ freeBuf [][]byte
+
+ wmu sync.Mutex // held while writing; acquire AFTER mu if holding both
+ werr error // first write error that has occurred
+}
+
+// clientStream is the state for a single HTTP/2 stream. One of these
+// is created for each Transport.RoundTrip call.
+type clientStream struct {
+ cc *ClientConn
+ req *http.Request
+ trace *clientTrace // or nil
+ ID uint32
+ resc chan resAndError
+ bufPipe pipe // buffered pipe with the flow-controlled response payload
+ requestedGzip bool
+ on100 func() // optional code to run if get a 100 continue response
+
+ flow flow // guarded by cc.mu
+ inflow flow // guarded by cc.mu
+ bytesRemain int64 // -1 means unknown; owned by transportResponseBody.Read
+ readErr error // sticky read error; owned by transportResponseBody.Read
+ stopReqBody error // if non-nil, stop writing req body; guarded by cc.mu
+
+ peerReset chan struct{} // closed on peer reset
+ resetErr error // populated before peerReset is closed
+
+ done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu
+
+ // owned by clientConnReadLoop:
+ pastHeaders bool // got first MetaHeadersFrame (actual headers)
+ pastTrailers bool // got optional second MetaHeadersFrame (trailers)
+
+ trailer http.Header // accumulated trailers
+ resTrailer *http.Header // client's Response.Trailer
+}
+
+// awaitRequestCancel runs in its own goroutine and waits for the user
+// to cancel a RoundTrip request, its context to expire, or for the
+// request to be done (any way it might be removed from the cc.streams
+// map: peer reset, successful completion, TCP connection breakage,
+// etc)
+func (cs *clientStream) awaitRequestCancel(req *http.Request) {
+ ctx := reqContext(req)
+ if req.Cancel == nil && ctx.Done() == nil {
+ return
+ }
+ select {
+ case <-req.Cancel:
+ cs.bufPipe.CloseWithError(errRequestCanceled)
+ cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+ case <-ctx.Done():
+ cs.bufPipe.CloseWithError(ctx.Err())
+ cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+ case <-cs.done:
+ }
+}
+
+// checkResetOrDone reports any error sent in a RST_STREAM frame by the
+// server, or errStreamClosed if the stream is complete.
+func (cs *clientStream) checkResetOrDone() error {
+ select {
+ case <-cs.peerReset:
+ return cs.resetErr
+ case <-cs.done:
+ return errStreamClosed
+ default:
+ return nil
+ }
+}
+
+func (cs *clientStream) abortRequestBodyWrite(err error) {
+ if err == nil {
+ panic("nil error")
+ }
+ cc := cs.cc
+ cc.mu.Lock()
+ cs.stopReqBody = err
+ cc.cond.Broadcast()
+ cc.mu.Unlock()
+}
+
+type stickyErrWriter struct {
+ w io.Writer
+ err *error
+}
+
+func (sew stickyErrWriter) Write(p []byte) (n int, err error) {
+ if *sew.err != nil {
+ return 0, *sew.err
+ }
+ n, err = sew.w.Write(p)
+ *sew.err = err
+ return
+}
+
+var ErrNoCachedConn = errors.New("http2: no cached connection was available")
+
+// RoundTripOpt are options for the Transport.RoundTripOpt method.
+type RoundTripOpt struct {
+ // OnlyCachedConn controls whether RoundTripOpt may
+ // create a new TCP connection. If set true and
+ // no cached connection is available, RoundTripOpt
+ // will return ErrNoCachedConn.
+ OnlyCachedConn bool
+}
+
+func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
+ return t.RoundTripOpt(req, RoundTripOpt{})
+}
+
+// authorityAddr returns a given authority (a host/IP, or host:port / ip:port)
+// and returns a host:port. The port 443 is added if needed.
+func authorityAddr(authority string) (addr string) {
+ if _, _, err := net.SplitHostPort(authority); err == nil {
+ return authority
+ }
+ return net.JoinHostPort(authority, "443")
+}
+
+// RoundTripOpt is like RoundTrip, but takes options.
+func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) {
+ if req.URL.Scheme != "https" {
+ return nil, errors.New("http2: unsupported scheme")
+ }
+
+ addr := authorityAddr(req.URL.Host)
+ for {
+ cc, err := t.connPool().GetClientConn(req, addr)
+ if err != nil {
+ t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err)
+ return nil, err
+ }
+ traceGotConn(req, cc)
+ res, err := cc.RoundTrip(req)
+ if shouldRetryRequest(req, err) {
+ continue
+ }
+ if err != nil {
+ t.vlogf("RoundTrip failure: %v", err)
+ return nil, err
+ }
+ return res, nil
+ }
+}
+
+// CloseIdleConnections closes any connections which were previously
+// connected from previous requests but are now sitting idle.
+// It does not interrupt any connections currently in use.
+func (t *Transport) CloseIdleConnections() {
+ if cp, ok := t.connPool().(clientConnPoolIdleCloser); ok {
+ cp.closeIdleConnections()
+ }
+}
+
+var (
+ errClientConnClosed = errors.New("http2: client conn is closed")
+ errClientConnUnusable = errors.New("http2: client conn not usable")
+)
+
+func shouldRetryRequest(req *http.Request, err error) bool {
+ // TODO: retry GET requests (no bodies) more aggressively, if shutdown
+ // before response.
+ return err == errClientConnUnusable
+}
+
+func (t *Transport) dialClientConn(addr string) (*ClientConn, error) {
+ host, _, err := net.SplitHostPort(addr)
+ if err != nil {
+ return nil, err
+ }
+ tconn, err := t.dialTLS()("tcp", addr, t.newTLSConfig(host))
+ if err != nil {
+ return nil, err
+ }
+ return t.NewClientConn(tconn)
+}
+
+func (t *Transport) newTLSConfig(host string) *tls.Config {
+ cfg := new(tls.Config)
+ if t.TLSClientConfig != nil {
+ *cfg = *t.TLSClientConfig
+ }
+ if !strSliceContains(cfg.NextProtos, NextProtoTLS) {
+ cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...)
+ }
+ if cfg.ServerName == "" {
+ cfg.ServerName = host
+ }
+ return cfg
+}
+
+func (t *Transport) dialTLS() func(string, string, *tls.Config) (net.Conn, error) {
+ if t.DialTLS != nil {
+ return t.DialTLS
+ }
+ return t.dialTLSDefault
+}
+
+func (t *Transport) dialTLSDefault(network, addr string, cfg *tls.Config) (net.Conn, error) {
+ cn, err := tls.Dial(network, addr, cfg)
+ if err != nil {
+ return nil, err
+ }
+ if err := cn.Handshake(); err != nil {
+ return nil, err
+ }
+ if !cfg.InsecureSkipVerify {
+ if err := cn.VerifyHostname(cfg.ServerName); err != nil {
+ return nil, err
+ }
+ }
+ state := cn.ConnectionState()
+ if p := state.NegotiatedProtocol; p != NextProtoTLS {
+ return nil, fmt.Errorf("http2: unexpected ALPN protocol %q; want %q", p, NextProtoTLS)
+ }
+ if !state.NegotiatedProtocolIsMutual {
+ return nil, errors.New("http2: could not negotiate protocol mutually")
+ }
+ return cn, nil
+}
+
+// disableKeepAlives reports whether connections should be closed as
+// soon as possible after handling the first request.
+func (t *Transport) disableKeepAlives() bool {
+ return t.t1 != nil && t.t1.DisableKeepAlives
+}
+
+func (t *Transport) expectContinueTimeout() time.Duration {
+ if t.t1 == nil {
+ return 0
+ }
+ return transportExpectContinueTimeout(t.t1)
+}
+
+func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
+ if VerboseLogs {
+ t.vlogf("http2: Transport creating client conn to %v", c.RemoteAddr())
+ }
+ if _, err := c.Write(clientPreface); err != nil {
+ t.vlogf("client preface write error: %v", err)
+ return nil, err
+ }
+
+ cc := &ClientConn{
+ t: t,
+ tconn: c,
+ readerDone: make(chan struct{}),
+ nextStreamID: 1,
+ maxFrameSize: 16 << 10, // spec default
+ initialWindowSize: 65535, // spec default
+ maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough.
+ streams: make(map[uint32]*clientStream),
+ }
+ cc.cond = sync.NewCond(&cc.mu)
+ cc.flow.add(int32(initialWindowSize))
+
+ // TODO: adjust this writer size to account for frame size +
+ // MTU + crypto/tls record padding.
+ cc.bw = bufio.NewWriter(stickyErrWriter{c, &cc.werr})
+ cc.br = bufio.NewReader(c)
+ cc.fr = NewFramer(cc.bw, cc.br)
+ cc.fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil)
+ cc.fr.MaxHeaderListSize = t.maxHeaderListSize()
+
+ // TODO: SetMaxDynamicTableSize, SetMaxDynamicTableSizeLimit on
+ // henc in response to SETTINGS frames?
+ cc.henc = hpack.NewEncoder(&cc.hbuf)
+
+ if cs, ok := c.(connectionStater); ok {
+ state := cs.ConnectionState()
+ cc.tlsState = &state
+ }
+
+ initialSettings := []Setting{
+ {ID: SettingEnablePush, Val: 0},
+ {ID: SettingInitialWindowSize, Val: transportDefaultStreamFlow},
+ }
+ if max := t.maxHeaderListSize(); max != 0 {
+ initialSettings = append(initialSettings, Setting{ID: SettingMaxHeaderListSize, Val: max})
+ }
+ cc.fr.WriteSettings(initialSettings...)
+ cc.fr.WriteWindowUpdate(0, transportDefaultConnFlow)
+ cc.inflow.add(transportDefaultConnFlow + initialWindowSize)
+ cc.bw.Flush()
+ if cc.werr != nil {
+ return nil, cc.werr
+ }
+
+ // Read the obligatory SETTINGS frame
+ f, err := cc.fr.ReadFrame()
+ if err != nil {
+ return nil, err
+ }
+ sf, ok := f.(*SettingsFrame)
+ if !ok {
+ return nil, fmt.Errorf("expected settings frame, got: %T", f)
+ }
+ cc.fr.WriteSettingsAck()
+ cc.bw.Flush()
+
+ sf.ForeachSetting(func(s Setting) error {
+ switch s.ID {
+ case SettingMaxFrameSize:
+ cc.maxFrameSize = s.Val
+ case SettingMaxConcurrentStreams:
+ cc.maxConcurrentStreams = s.Val
+ case SettingInitialWindowSize:
+ cc.initialWindowSize = s.Val
+ default:
+ // TODO(bradfitz): handle more; at least SETTINGS_HEADER_TABLE_SIZE?
+ t.vlogf("Unhandled Setting: %v", s)
+ }
+ return nil
+ })
+
+ go cc.readLoop()
+ return cc, nil
+}
+
+func (cc *ClientConn) setGoAway(f *GoAwayFrame) {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ cc.goAway = f
+}
+
+func (cc *ClientConn) CanTakeNewRequest() bool {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ return cc.canTakeNewRequestLocked()
+}
+
+func (cc *ClientConn) canTakeNewRequestLocked() bool {
+ return cc.goAway == nil && !cc.closed &&
+ int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) &&
+ cc.nextStreamID < 2147483647
+}
+
+func (cc *ClientConn) closeIfIdle() {
+ cc.mu.Lock()
+ if len(cc.streams) > 0 {
+ cc.mu.Unlock()
+ return
+ }
+ cc.closed = true
+ // TODO: do clients send GOAWAY too? maybe? Just Close:
+ cc.mu.Unlock()
+
+ cc.tconn.Close()
+}
+
+const maxAllocFrameSize = 512 << 10
+
+// frameBuffer returns a scratch buffer suitable for writing DATA frames.
+// They're capped at the min of the peer's max frame size or 512KB
+// (kinda arbitrarily), but definitely capped so we don't allocate 4GB
+// bufers.
+func (cc *ClientConn) frameScratchBuffer() []byte {
+ cc.mu.Lock()
+ size := cc.maxFrameSize
+ if size > maxAllocFrameSize {
+ size = maxAllocFrameSize
+ }
+ for i, buf := range cc.freeBuf {
+ if len(buf) >= int(size) {
+ cc.freeBuf[i] = nil
+ cc.mu.Unlock()
+ return buf[:size]
+ }
+ }
+ cc.mu.Unlock()
+ return make([]byte, size)
+}
+
+func (cc *ClientConn) putFrameScratchBuffer(buf []byte) {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ const maxBufs = 4 // arbitrary; 4 concurrent requests per conn? investigate.
+ if len(cc.freeBuf) < maxBufs {
+ cc.freeBuf = append(cc.freeBuf, buf)
+ return
+ }
+ for i, old := range cc.freeBuf {
+ if old == nil {
+ cc.freeBuf[i] = buf
+ return
+ }
+ }
+ // forget about it.
+}
+
+// errRequestCanceled is a copy of net/http's errRequestCanceled because it's not
+// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests.
+var errRequestCanceled = errors.New("net/http: request canceled")
+
+func commaSeparatedTrailers(req *http.Request) (string, error) {
+ keys := make([]string, 0, len(req.Trailer))
+ for k := range req.Trailer {
+ k = http.CanonicalHeaderKey(k)
+ switch k {
+ case "Transfer-Encoding", "Trailer", "Content-Length":
+ return "", &badStringError{"invalid Trailer key", k}
+ }
+ keys = append(keys, k)
+ }
+ if len(keys) > 0 {
+ sort.Strings(keys)
+ // TODO: could do better allocation-wise here, but trailers are rare,
+ // so being lazy for now.
+ return strings.Join(keys, ","), nil
+ }
+ return "", nil
+}
+
+func (cc *ClientConn) responseHeaderTimeout() time.Duration {
+ if cc.t.t1 != nil {
+ return cc.t.t1.ResponseHeaderTimeout
+ }
+ // No way to do this (yet?) with just an http2.Transport. Probably
+ // no need. Request.Cancel this is the new way. We only need to support
+ // this for compatibility with the old http.Transport fields when
+ // we're doing transparent http2.
+ return 0
+}
+
+// checkConnHeaders checks whether req has any invalid connection-level headers.
+// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields.
+// Certain headers are special-cased as okay but not transmitted later.
+func checkConnHeaders(req *http.Request) error {
+ if v := req.Header.Get("Upgrade"); v != "" {
+ return errors.New("http2: invalid Upgrade request header")
+ }
+ if v := req.Header.Get("Transfer-Encoding"); (v != "" && v != "chunked") || len(req.Header["Transfer-Encoding"]) > 1 {
+ return errors.New("http2: invalid Transfer-Encoding request header")
+ }
+ if v := req.Header.Get("Connection"); (v != "" && v != "close" && v != "keep-alive") || len(req.Header["Connection"]) > 1 {
+ return errors.New("http2: invalid Connection request header")
+ }
+ return nil
+}
+
+func bodyAndLength(req *http.Request) (body io.Reader, contentLen int64) {
+ body = req.Body
+ if body == nil {
+ return nil, 0
+ }
+ if req.ContentLength != 0 {
+ return req.Body, req.ContentLength
+ }
+
+ // We have a body but a zero content length. Test to see if
+ // it's actually zero or just unset.
+ var buf [1]byte
+ n, rerr := io.ReadFull(body, buf[:])
+ if rerr != nil && rerr != io.EOF {
+ return errorReader{rerr}, -1
+ }
+ if n == 1 {
+ // Oh, guess there is data in this Body Reader after all.
+ // The ContentLength field just wasn't set.
+ // Stich the Body back together again, re-attaching our
+ // consumed byte.
+ return io.MultiReader(bytes.NewReader(buf[:]), body), -1
+ }
+ // Body is actually zero bytes.
+ return nil, 0
+}
+
+func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
+ if err := checkConnHeaders(req); err != nil {
+ return nil, err
+ }
+
+ trailers, err := commaSeparatedTrailers(req)
+ if err != nil {
+ return nil, err
+ }
+ hasTrailers := trailers != ""
+
+ body, contentLen := bodyAndLength(req)
+ hasBody := body != nil
+
+ cc.mu.Lock()
+ cc.lastActive = time.Now()
+ if cc.closed || !cc.canTakeNewRequestLocked() {
+ cc.mu.Unlock()
+ return nil, errClientConnUnusable
+ }
+
+ // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
+ var requestedGzip bool
+ if !cc.t.disableCompression() &&
+ req.Header.Get("Accept-Encoding") == "" &&
+ req.Header.Get("Range") == "" &&
+ req.Method != "HEAD" {
+ // Request gzip only, not deflate. Deflate is ambiguous and
+ // not as universally supported anyway.
+ // See: http://www.gzip.org/zlib/zlib_faq.html#faq38
+ //
+ // Note that we don't request this for HEAD requests,
+ // due to a bug in nginx:
+ // http://trac.nginx.org/nginx/ticket/358
+ // https://golang.org/issue/5522
+ //
+ // We don't request gzip if the request is for a range, since
+ // auto-decoding a portion of a gzipped document will just fail
+ // anyway. See https://golang.org/issue/8923
+ requestedGzip = true
+ }
+
+ // we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is
+ // sent by writeRequestBody below, along with any Trailers,
+ // again in form HEADERS{1}, CONTINUATION{0,})
+ hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen)
+ if err != nil {
+ cc.mu.Unlock()
+ return nil, err
+ }
+
+ cs := cc.newStream()
+ cs.req = req
+ cs.trace = requestTrace(req)
+ cs.requestedGzip = requestedGzip
+ bodyWriter := cc.t.getBodyWriterState(cs, body)
+ cs.on100 = bodyWriter.on100
+
+ cc.wmu.Lock()
+ endStream := !hasBody && !hasTrailers
+ werr := cc.writeHeaders(cs.ID, endStream, hdrs)
+ cc.wmu.Unlock()
+ traceWroteHeaders(cs.trace)
+ cc.mu.Unlock()
+
+ if werr != nil {
+ if hasBody {
+ req.Body.Close() // per RoundTripper contract
+ bodyWriter.cancel()
+ }
+ cc.forgetStreamID(cs.ID)
+ // Don't bother sending a RST_STREAM (our write already failed;
+ // no need to keep writing)
+ traceWroteRequest(cs.trace, werr)
+ return nil, werr
+ }
+
+ var respHeaderTimer <-chan time.Time
+ if hasBody {
+ bodyWriter.scheduleBodyWrite()
+ } else {
+ traceWroteRequest(cs.trace, nil)
+ if d := cc.responseHeaderTimeout(); d != 0 {
+ timer := time.NewTimer(d)
+ defer timer.Stop()
+ respHeaderTimer = timer.C
+ }
+ }
+
+ readLoopResCh := cs.resc
+ bodyWritten := false
+ ctx := reqContext(req)
+
+ for {
+ select {
+ case re := <-readLoopResCh:
+ res := re.res
+ if re.err != nil || res.StatusCode > 299 {
+ // On error or status code 3xx, 4xx, 5xx, etc abort any
+ // ongoing write, assuming that the server doesn't care
+ // about our request body. If the server replied with 1xx or
+ // 2xx, however, then assume the server DOES potentially
+ // want our body (e.g. full-duplex streaming:
+ // golang.org/issue/13444). If it turns out the server
+ // doesn't, they'll RST_STREAM us soon enough. This is a
+ // heuristic to avoid adding knobs to Transport. Hopefully
+ // we can keep it.
+ bodyWriter.cancel()
+ cs.abortRequestBodyWrite(errStopReqBodyWrite)
+ }
+ if re.err != nil {
+ cc.forgetStreamID(cs.ID)
+ return nil, re.err
+ }
+ res.Request = req
+ res.TLS = cc.tlsState
+ return res, nil
+ case <-respHeaderTimer:
+ cc.forgetStreamID(cs.ID)
+ if !hasBody || bodyWritten {
+ cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+ } else {
+ bodyWriter.cancel()
+ cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
+ }
+ return nil, errTimeout
+ case <-ctx.Done():
+ cc.forgetStreamID(cs.ID)
+ if !hasBody || bodyWritten {
+ cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+ } else {
+ bodyWriter.cancel()
+ cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
+ }
+ return nil, ctx.Err()
+ case <-req.Cancel:
+ cc.forgetStreamID(cs.ID)
+ if !hasBody || bodyWritten {
+ cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+ } else {
+ bodyWriter.cancel()
+ cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel)
+ }
+ return nil, errRequestCanceled
+ case <-cs.peerReset:
+ // processResetStream already removed the
+ // stream from the streams map; no need for
+ // forgetStreamID.
+ return nil, cs.resetErr
+ case err := <-bodyWriter.resc:
+ if err != nil {
+ return nil, err
+ }
+ bodyWritten = true
+ if d := cc.responseHeaderTimeout(); d != 0 {
+ timer := time.NewTimer(d)
+ defer timer.Stop()
+ respHeaderTimer = timer.C
+ }
+ }
+ }
+}
+
+// requires cc.wmu be held
+func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, hdrs []byte) error {
+ first := true // first frame written (HEADERS is first, then CONTINUATION)
+ frameSize := int(cc.maxFrameSize)
+ for len(hdrs) > 0 && cc.werr == nil {
+ chunk := hdrs
+ if len(chunk) > frameSize {
+ chunk = chunk[:frameSize]
+ }
+ hdrs = hdrs[len(chunk):]
+ endHeaders := len(hdrs) == 0
+ if first {
+ cc.fr.WriteHeaders(HeadersFrameParam{
+ StreamID: streamID,
+ BlockFragment: chunk,
+ EndStream: endStream,
+ EndHeaders: endHeaders,
+ })
+ first = false
+ } else {
+ cc.fr.WriteContinuation(streamID, endHeaders, chunk)
+ }
+ }
+ // TODO(bradfitz): this Flush could potentially block (as
+ // could the WriteHeaders call(s) above), which means they
+ // wouldn't respond to Request.Cancel being readable. That's
+ // rare, but this should probably be in a goroutine.
+ cc.bw.Flush()
+ return cc.werr
+}
+
+// internal error values; they don't escape to callers
+var (
+ // abort request body write; don't send cancel
+ errStopReqBodyWrite = errors.New("http2: aborting request body write")
+
+ // abort request body write, but send stream reset of cancel.
+ errStopReqBodyWriteAndCancel = errors.New("http2: canceling request")
+)
+
+func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (err error) {
+ cc := cs.cc
+ sentEnd := false // whether we sent the final DATA frame w/ END_STREAM
+ buf := cc.frameScratchBuffer()
+ defer cc.putFrameScratchBuffer(buf)
+
+ defer func() {
+ traceWroteRequest(cs.trace, err)
+ // TODO: write h12Compare test showing whether
+ // Request.Body is closed by the Transport,
+ // and in multiple cases: server replies <=299 and >299
+ // while still writing request body
+ cerr := bodyCloser.Close()
+ if err == nil {
+ err = cerr
+ }
+ }()
+
+ req := cs.req
+ hasTrailers := req.Trailer != nil
+
+ var sawEOF bool
+ for !sawEOF {
+ n, err := body.Read(buf)
+ if err == io.EOF {
+ sawEOF = true
+ err = nil
+ } else if err != nil {
+ return err
+ }
+
+ remain := buf[:n]
+ for len(remain) > 0 && err == nil {
+ var allowed int32
+ allowed, err = cs.awaitFlowControl(len(remain))
+ switch {
+ case err == errStopReqBodyWrite:
+ return err
+ case err == errStopReqBodyWriteAndCancel:
+ cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+ return err
+ case err != nil:
+ return err
+ }
+ cc.wmu.Lock()
+ data := remain[:allowed]
+ remain = remain[allowed:]
+ sentEnd = sawEOF && len(remain) == 0 && !hasTrailers
+ err = cc.fr.WriteData(cs.ID, sentEnd, data)
+ if err == nil {
+ // TODO(bradfitz): this flush is for latency, not bandwidth.
+ // Most requests won't need this. Make this opt-in or opt-out?
+ // Use some heuristic on the body type? Nagel-like timers?
+ // Based on 'n'? Only last chunk of this for loop, unless flow control
+ // tokens are low? For now, always:
+ err = cc.bw.Flush()
+ }
+ cc.wmu.Unlock()
+ }
+ if err != nil {
+ return err
+ }
+ }
+
+ cc.wmu.Lock()
+ if !sentEnd {
+ var trls []byte
+ if hasTrailers {
+ cc.mu.Lock()
+ trls = cc.encodeTrailers(req)
+ cc.mu.Unlock()
+ }
+
+ // Avoid forgetting to send an END_STREAM if the encoded
+ // trailers are 0 bytes. Both results produce and END_STREAM.
+ if len(trls) > 0 {
+ err = cc.writeHeaders(cs.ID, true, trls)
+ } else {
+ err = cc.fr.WriteData(cs.ID, true, nil)
+ }
+ }
+ if ferr := cc.bw.Flush(); ferr != nil && err == nil {
+ err = ferr
+ }
+ cc.wmu.Unlock()
+
+ return err
+}
+
+// awaitFlowControl waits for [1, min(maxBytes, cc.cs.maxFrameSize)] flow
+// control tokens from the server.
+// It returns either the non-zero number of tokens taken or an error
+// if the stream is dead.
+func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error) {
+ cc := cs.cc
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ for {
+ if cc.closed {
+ return 0, errClientConnClosed
+ }
+ if cs.stopReqBody != nil {
+ return 0, cs.stopReqBody
+ }
+ if err := cs.checkResetOrDone(); err != nil {
+ return 0, err
+ }
+ if a := cs.flow.available(); a > 0 {
+ take := a
+ if int(take) > maxBytes {
+
+ take = int32(maxBytes) // can't truncate int; take is int32
+ }
+ if take > int32(cc.maxFrameSize) {
+ take = int32(cc.maxFrameSize)
+ }
+ cs.flow.take(take)
+ return take, nil
+ }
+ cc.cond.Wait()
+ }
+}
+
+type badStringError struct {
+ what string
+ str string
+}
+
+func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) }
+
+// requires cc.mu be held.
+func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) {
+ cc.hbuf.Reset()
+
+ host := req.Host
+ if host == "" {
+ host = req.URL.Host
+ }
+
+ // Check for any invalid headers and return an error before we
+ // potentially pollute our hpack state. (We want to be able to
+ // continue to reuse the hpack encoder for future requests)
+ for k, vv := range req.Header {
+ if !httplex.ValidHeaderFieldName(k) {
+ return nil, fmt.Errorf("invalid HTTP header name %q", k)
+ }
+ for _, v := range vv {
+ if !httplex.ValidHeaderFieldValue(v) {
+ return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k)
+ }
+ }
+ }
+
+ // 8.1.2.3 Request Pseudo-Header Fields
+ // The :path pseudo-header field includes the path and query parts of the
+ // target URI (the path-absolute production and optionally a '?' character
+ // followed by the query production (see Sections 3.3 and 3.4 of
+ // [RFC3986]).
+ cc.writeHeader(":authority", host)
+ cc.writeHeader(":method", req.Method)
+ if req.Method != "CONNECT" {
+ cc.writeHeader(":path", req.URL.RequestURI())
+ cc.writeHeader(":scheme", "https")
+ }
+ if trailers != "" {
+ cc.writeHeader("trailer", trailers)
+ }
+
+ var didUA bool
+ for k, vv := range req.Header {
+ lowKey := strings.ToLower(k)
+ switch lowKey {
+ case "host", "content-length":
+ // Host is :authority, already sent.
+ // Content-Length is automatic, set below.
+ continue
+ case "connection", "proxy-connection", "transfer-encoding", "upgrade", "keep-alive":
+ // Per 8.1.2.2 Connection-Specific Header
+ // Fields, don't send connection-specific
+ // fields. We have already checked if any
+ // are error-worthy so just ignore the rest.
+ continue
+ case "user-agent":
+ // Match Go's http1 behavior: at most one
+ // User-Agent. If set to nil or empty string,
+ // then omit it. Otherwise if not mentioned,
+ // include the default (below).
+ didUA = true
+ if len(vv) < 1 {
+ continue
+ }
+ vv = vv[:1]
+ if vv[0] == "" {
+ continue
+ }
+ }
+ for _, v := range vv {
+ cc.writeHeader(lowKey, v)
+ }
+ }
+ if shouldSendReqContentLength(req.Method, contentLength) {
+ cc.writeHeader("content-length", strconv.FormatInt(contentLength, 10))
+ }
+ if addGzipHeader {
+ cc.writeHeader("accept-encoding", "gzip")
+ }
+ if !didUA {
+ cc.writeHeader("user-agent", defaultUserAgent)
+ }
+ return cc.hbuf.Bytes(), nil
+}
+
+// shouldSendReqContentLength reports whether the http2.Transport should send
+// a "content-length" request header. This logic is basically a copy of the net/http
+// transferWriter.shouldSendContentLength.
+// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown).
+// -1 means unknown.
+func shouldSendReqContentLength(method string, contentLength int64) bool {
+ if contentLength > 0 {
+ return true
+ }
+ if contentLength < 0 {
+ return false
+ }
+ // For zero bodies, whether we send a content-length depends on the method.
+ // It also kinda doesn't matter for http2 either way, with END_STREAM.
+ switch method {
+ case "POST", "PUT", "PATCH":
+ return true
+ default:
+ return false
+ }
+}
+
+// requires cc.mu be held.
+func (cc *ClientConn) encodeTrailers(req *http.Request) []byte {
+ cc.hbuf.Reset()
+ for k, vv := range req.Trailer {
+ // Transfer-Encoding, etc.. have already been filter at the
+ // start of RoundTrip
+ lowKey := strings.ToLower(k)
+ for _, v := range vv {
+ cc.writeHeader(lowKey, v)
+ }
+ }
+ return cc.hbuf.Bytes()
+}
+
+func (cc *ClientConn) writeHeader(name, value string) {
+ if VerboseLogs {
+ log.Printf("http2: Transport encoding header %q = %q", name, value)
+ }
+ cc.henc.WriteField(hpack.HeaderField{Name: name, Value: value})
+}
+
+type resAndError struct {
+ res *http.Response
+ err error
+}
+
+// requires cc.mu be held.
+func (cc *ClientConn) newStream() *clientStream {
+ cs := &clientStream{
+ cc: cc,
+ ID: cc.nextStreamID,
+ resc: make(chan resAndError, 1),
+ peerReset: make(chan struct{}),
+ done: make(chan struct{}),
+ }
+ cs.flow.add(int32(cc.initialWindowSize))
+ cs.flow.setConnFlow(&cc.flow)
+ cs.inflow.add(transportDefaultStreamFlow)
+ cs.inflow.setConnFlow(&cc.inflow)
+ cc.nextStreamID += 2
+ cc.streams[cs.ID] = cs
+ return cs
+}
+
+func (cc *ClientConn) forgetStreamID(id uint32) {
+ cc.streamByID(id, true)
+}
+
+func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream {
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ cs := cc.streams[id]
+ if andRemove && cs != nil && !cc.closed {
+ cc.lastActive = time.Now()
+ delete(cc.streams, id)
+ close(cs.done)
+ cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl
+ }
+ return cs
+}
+
+// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop.
+type clientConnReadLoop struct {
+ cc *ClientConn
+ activeRes map[uint32]*clientStream // keyed by streamID
+ closeWhenIdle bool
+}
+
+// readLoop runs in its own goroutine and reads and dispatches frames.
+func (cc *ClientConn) readLoop() {
+ rl := &clientConnReadLoop{
+ cc: cc,
+ activeRes: make(map[uint32]*clientStream),
+ }
+
+ defer rl.cleanup()
+ cc.readerErr = rl.run()
+ if ce, ok := cc.readerErr.(ConnectionError); ok {
+ cc.wmu.Lock()
+ cc.fr.WriteGoAway(0, ErrCode(ce), nil)
+ cc.wmu.Unlock()
+ }
+}
+
+func (rl *clientConnReadLoop) cleanup() {
+ cc := rl.cc
+ defer cc.tconn.Close()
+ defer cc.t.connPool().MarkDead(cc)
+ defer close(cc.readerDone)
+
+ // Close any response bodies if the server closes prematurely.
+ // TODO: also do this if we've written the headers but not
+ // gotten a response yet.
+ err := cc.readerErr
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ cc.mu.Lock()
+ for _, cs := range rl.activeRes {
+ cs.bufPipe.CloseWithError(err)
+ }
+ for _, cs := range cc.streams {
+ select {
+ case cs.resc <- resAndError{err: err}:
+ default:
+ }
+ close(cs.done)
+ }
+ cc.closed = true
+ cc.cond.Broadcast()
+ cc.mu.Unlock()
+}
+
+func (rl *clientConnReadLoop) run() error {
+ cc := rl.cc
+ rl.closeWhenIdle = cc.t.disableKeepAlives()
+ gotReply := false // ever saw a reply
+ for {
+ f, err := cc.fr.ReadFrame()
+ if err != nil {
+ cc.vlogf("Transport readFrame error: (%T) %v", err, err)
+ }
+ if se, ok := err.(StreamError); ok {
+ if cs := cc.streamByID(se.StreamID, true /*ended; remove it*/); cs != nil {
+ rl.endStreamError(cs, cc.fr.errDetail)
+ }
+ continue
+ } else if err != nil {
+ return err
+ }
+ if VerboseLogs {
+ cc.vlogf("http2: Transport received %s", summarizeFrame(f))
+ }
+ maybeIdle := false // whether frame might transition us to idle
+
+ switch f := f.(type) {
+ case *MetaHeadersFrame:
+ err = rl.processHeaders(f)
+ maybeIdle = true
+ gotReply = true
+ case *DataFrame:
+ err = rl.processData(f)
+ maybeIdle = true
+ case *GoAwayFrame:
+ err = rl.processGoAway(f)
+ maybeIdle = true
+ case *RSTStreamFrame:
+ err = rl.processResetStream(f)
+ maybeIdle = true
+ case *SettingsFrame:
+ err = rl.processSettings(f)
+ case *PushPromiseFrame:
+ err = rl.processPushPromise(f)
+ case *WindowUpdateFrame:
+ err = rl.processWindowUpdate(f)
+ case *PingFrame:
+ err = rl.processPing(f)
+ default:
+ cc.logf("Transport: unhandled response frame type %T", f)
+ }
+ if err != nil {
+ return err
+ }
+ if rl.closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 {
+ cc.closeIfIdle()
+ }
+ }
+}
+
+func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error {
+ cc := rl.cc
+ cs := cc.streamByID(f.StreamID, f.StreamEnded())
+ if cs == nil {
+ // We'd get here if we canceled a request while the
+ // server had its response still in flight. So if this
+ // was just something we canceled, ignore it.
+ return nil
+ }
+ if !cs.pastHeaders {
+ cs.pastHeaders = true
+ } else {
+ return rl.processTrailers(cs, f)
+ }
+ if cs.trace != nil {
+ // TODO(bradfitz): move first response byte earlier,
+ // when we first read the 9 byte header, not waiting
+ // until all the HEADERS+CONTINUATION frames have been
+ // merged. This works for now.
+ traceFirstResponseByte(cs.trace)
+ }
+
+ res, err := rl.handleResponse(cs, f)
+ if err != nil {
+ if _, ok := err.(ConnectionError); ok {
+ return err
+ }
+ // Any other error type is a stream error.
+ cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err)
+ cs.resc <- resAndError{err: err}
+ return nil // return nil from process* funcs to keep conn alive
+ }
+ if res == nil {
+ // (nil, nil) special case. See handleResponse docs.
+ return nil
+ }
+ if res.Body != noBody {
+ rl.activeRes[cs.ID] = cs
+ }
+ cs.resTrailer = &res.Trailer
+ cs.resc <- resAndError{res: res}
+ return nil
+}
+
+// may return error types nil, or ConnectionError. Any other error value
+// is a StreamError of type ErrCodeProtocol. The returned error in that case
+// is the detail.
+//
+// As a special case, handleResponse may return (nil, nil) to skip the
+// frame (currently only used for 100 expect continue). This special
+// case is going away after Issue 13851 is fixed.
+func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) {
+ if f.Truncated {
+ return nil, errResponseHeaderListSize
+ }
+
+ status := f.PseudoValue("status")
+ if status == "" {
+ return nil, errors.New("missing status pseudo header")
+ }
+ statusCode, err := strconv.Atoi(status)
+ if err != nil {
+ return nil, errors.New("malformed non-numeric status pseudo header")
+ }
+
+ if statusCode == 100 {
+ traceGot100Continue(cs.trace)
+ if cs.on100 != nil {
+ cs.on100() // forces any write delay timer to fire
+ }
+ cs.pastHeaders = false // do it all again
+ return nil, nil
+ }
+
+ header := make(http.Header)
+ res := &http.Response{
+ Proto: "HTTP/2.0",
+ ProtoMajor: 2,
+ Header: header,
+ StatusCode: statusCode,
+ Status: status + " " + http.StatusText(statusCode),
+ }
+ for _, hf := range f.RegularFields() {
+ key := http.CanonicalHeaderKey(hf.Name)
+ if key == "Trailer" {
+ t := res.Trailer
+ if t == nil {
+ t = make(http.Header)
+ res.Trailer = t
+ }
+ foreachHeaderElement(hf.Value, func(v string) {
+ t[http.CanonicalHeaderKey(v)] = nil
+ })
+ } else {
+ header[key] = append(header[key], hf.Value)
+ }
+ }
+
+ streamEnded := f.StreamEnded()
+ isHead := cs.req.Method == "HEAD"
+ if !streamEnded || isHead {
+ res.ContentLength = -1
+ if clens := res.Header["Content-Length"]; len(clens) == 1 {
+ if clen64, err := strconv.ParseInt(clens[0], 10, 64); err == nil {
+ res.ContentLength = clen64
+ } else {
+ // TODO: care? unlike http/1, it won't mess up our framing, so it's
+ // more safe smuggling-wise to ignore.
+ }
+ } else if len(clens) > 1 {
+ // TODO: care? unlike http/1, it won't mess up our framing, so it's
+ // more safe smuggling-wise to ignore.
+ }
+ }
+
+ if streamEnded || isHead {
+ res.Body = noBody
+ return res, nil
+ }
+
+ buf := new(bytes.Buffer) // TODO(bradfitz): recycle this garbage
+ cs.bufPipe = pipe{b: buf}
+ cs.bytesRemain = res.ContentLength
+ res.Body = transportResponseBody{cs}
+ go cs.awaitRequestCancel(cs.req)
+
+ if cs.requestedGzip && res.Header.Get("Content-Encoding") == "gzip" {
+ res.Header.Del("Content-Encoding")
+ res.Header.Del("Content-Length")
+ res.ContentLength = -1
+ res.Body = &gzipReader{body: res.Body}
+ setResponseUncompressed(res)
+ }
+ return res, nil
+}
+
+func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFrame) error {
+ if cs.pastTrailers {
+ // Too many HEADERS frames for this stream.
+ return ConnectionError(ErrCodeProtocol)
+ }
+ cs.pastTrailers = true
+ if !f.StreamEnded() {
+ // We expect that any headers for trailers also
+ // has END_STREAM.
+ return ConnectionError(ErrCodeProtocol)
+ }
+ if len(f.PseudoFields()) > 0 {
+ // No pseudo header fields are defined for trailers.
+ // TODO: ConnectionError might be overly harsh? Check.
+ return ConnectionError(ErrCodeProtocol)
+ }
+
+ trailer := make(http.Header)
+ for _, hf := range f.RegularFields() {
+ key := http.CanonicalHeaderKey(hf.Name)
+ trailer[key] = append(trailer[key], hf.Value)
+ }
+ cs.trailer = trailer
+
+ rl.endStream(cs)
+ return nil
+}
+
+// transportResponseBody is the concrete type of Transport.RoundTrip's
+// Response.Body. It is an io.ReadCloser. On Read, it reads from cs.body.
+// On Close it sends RST_STREAM if EOF wasn't already seen.
+type transportResponseBody struct {
+ cs *clientStream
+}
+
+func (b transportResponseBody) Read(p []byte) (n int, err error) {
+ cs := b.cs
+ cc := cs.cc
+
+ if cs.readErr != nil {
+ return 0, cs.readErr
+ }
+ n, err = b.cs.bufPipe.Read(p)
+ if cs.bytesRemain != -1 {
+ if int64(n) > cs.bytesRemain {
+ n = int(cs.bytesRemain)
+ if err == nil {
+ err = errors.New("net/http: server replied with more than declared Content-Length; truncated")
+ cc.writeStreamReset(cs.ID, ErrCodeProtocol, err)
+ }
+ cs.readErr = err
+ return int(cs.bytesRemain), err
+ }
+ cs.bytesRemain -= int64(n)
+ if err == io.EOF && cs.bytesRemain > 0 {
+ err = io.ErrUnexpectedEOF
+ cs.readErr = err
+ return n, err
+ }
+ }
+ if n == 0 {
+ // No flow control tokens to send back.
+ return
+ }
+
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+
+ var connAdd, streamAdd int32
+ // Check the conn-level first, before the stream-level.
+ if v := cc.inflow.available(); v < transportDefaultConnFlow/2 {
+ connAdd = transportDefaultConnFlow - v
+ cc.inflow.add(connAdd)
+ }
+ if err == nil { // No need to refresh if the stream is over or failed.
+ if v := cs.inflow.available(); v < transportDefaultStreamFlow-transportDefaultStreamMinRefresh {
+ streamAdd = transportDefaultStreamFlow - v
+ cs.inflow.add(streamAdd)
+ }
+ }
+ if connAdd != 0 || streamAdd != 0 {
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+ if connAdd != 0 {
+ cc.fr.WriteWindowUpdate(0, mustUint31(connAdd))
+ }
+ if streamAdd != 0 {
+ cc.fr.WriteWindowUpdate(cs.ID, mustUint31(streamAdd))
+ }
+ cc.bw.Flush()
+ }
+ return
+}
+
+var errClosedResponseBody = errors.New("http2: response body closed")
+
+func (b transportResponseBody) Close() error {
+ cs := b.cs
+ if cs.bufPipe.Err() != io.EOF {
+ // TODO: write test for this
+ cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil)
+ }
+ cs.bufPipe.BreakWithError(errClosedResponseBody)
+ return nil
+}
+
+func (rl *clientConnReadLoop) processData(f *DataFrame) error {
+ cc := rl.cc
+ cs := cc.streamByID(f.StreamID, f.StreamEnded())
+ if cs == nil {
+ cc.mu.Lock()
+ neverSent := cc.nextStreamID
+ cc.mu.Unlock()
+ if f.StreamID >= neverSent {
+ // We never asked for this.
+ cc.logf("http2: Transport received unsolicited DATA frame; closing connection")
+ return ConnectionError(ErrCodeProtocol)
+ }
+ // We probably did ask for this, but canceled. Just ignore it.
+ // TODO: be stricter here? only silently ignore things which
+ // we canceled, but not things which were closed normally
+ // by the peer? Tough without accumulating too much state.
+ return nil
+ }
+ if data := f.Data(); len(data) > 0 {
+ if cs.bufPipe.b == nil {
+ // Data frame after it's already closed?
+ cc.logf("http2: Transport received DATA frame for closed stream; closing connection")
+ return ConnectionError(ErrCodeProtocol)
+ }
+
+ // Check connection-level flow control.
+ cc.mu.Lock()
+ if cs.inflow.available() >= int32(len(data)) {
+ cs.inflow.take(int32(len(data)))
+ } else {
+ cc.mu.Unlock()
+ return ConnectionError(ErrCodeFlowControl)
+ }
+ cc.mu.Unlock()
+
+ if _, err := cs.bufPipe.Write(data); err != nil {
+ rl.endStreamError(cs, err)
+ return err
+ }
+ }
+
+ if f.StreamEnded() {
+ rl.endStream(cs)
+ }
+ return nil
+}
+
+var errInvalidTrailers = errors.New("http2: invalid trailers")
+
+func (rl *clientConnReadLoop) endStream(cs *clientStream) {
+ // TODO: check that any declared content-length matches, like
+ // server.go's (*stream).endStream method.
+ rl.endStreamError(cs, nil)
+}
+
+func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) {
+ var code func()
+ if err == nil {
+ err = io.EOF
+ code = cs.copyTrailers
+ }
+ cs.bufPipe.closeWithErrorAndCode(err, code)
+ delete(rl.activeRes, cs.ID)
+ if cs.req.Close || cs.req.Header.Get("Connection") == "close" {
+ rl.closeWhenIdle = true
+ }
+}
+
+func (cs *clientStream) copyTrailers() {
+ for k, vv := range cs.trailer {
+ t := cs.resTrailer
+ if *t == nil {
+ *t = make(http.Header)
+ }
+ (*t)[k] = vv
+ }
+}
+
+func (rl *clientConnReadLoop) processGoAway(f *GoAwayFrame) error {
+ cc := rl.cc
+ cc.t.connPool().MarkDead(cc)
+ if f.ErrCode != 0 {
+ // TODO: deal with GOAWAY more. particularly the error code
+ cc.vlogf("transport got GOAWAY with error code = %v", f.ErrCode)
+ }
+ cc.setGoAway(f)
+ return nil
+}
+
+func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error {
+ cc := rl.cc
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+ return f.ForeachSetting(func(s Setting) error {
+ switch s.ID {
+ case SettingMaxFrameSize:
+ cc.maxFrameSize = s.Val
+ case SettingMaxConcurrentStreams:
+ cc.maxConcurrentStreams = s.Val
+ case SettingInitialWindowSize:
+ // TODO: error if this is too large.
+
+ // TODO: adjust flow control of still-open
+ // frames by the difference of the old initial
+ // window size and this one.
+ cc.initialWindowSize = s.Val
+ default:
+ // TODO(bradfitz): handle more settings? SETTINGS_HEADER_TABLE_SIZE probably.
+ cc.vlogf("Unhandled Setting: %v", s)
+ }
+ return nil
+ })
+}
+
+func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error {
+ cc := rl.cc
+ cs := cc.streamByID(f.StreamID, false)
+ if f.StreamID != 0 && cs == nil {
+ return nil
+ }
+
+ cc.mu.Lock()
+ defer cc.mu.Unlock()
+
+ fl := &cc.flow
+ if cs != nil {
+ fl = &cs.flow
+ }
+ if !fl.add(int32(f.Increment)) {
+ return ConnectionError(ErrCodeFlowControl)
+ }
+ cc.cond.Broadcast()
+ return nil
+}
+
+func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error {
+ cs := rl.cc.streamByID(f.StreamID, true)
+ if cs == nil {
+ // TODO: return error if server tries to RST_STEAM an idle stream
+ return nil
+ }
+ select {
+ case <-cs.peerReset:
+ // Already reset.
+ // This is the only goroutine
+ // which closes this, so there
+ // isn't a race.
+ default:
+ err := StreamError{cs.ID, f.ErrCode}
+ cs.resetErr = err
+ close(cs.peerReset)
+ cs.bufPipe.CloseWithError(err)
+ cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl
+ }
+ delete(rl.activeRes, cs.ID)
+ return nil
+}
+
+func (rl *clientConnReadLoop) processPing(f *PingFrame) error {
+ if f.IsAck() {
+ // 6.7 PING: " An endpoint MUST NOT respond to PING frames
+ // containing this flag."
+ return nil
+ }
+ cc := rl.cc
+ cc.wmu.Lock()
+ defer cc.wmu.Unlock()
+ if err := cc.fr.WritePing(true, f.Data); err != nil {
+ return err
+ }
+ return cc.bw.Flush()
+}
+
+func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error {
+ // We told the peer we don't want them.
+ // Spec says:
+ // "PUSH_PROMISE MUST NOT be sent if the SETTINGS_ENABLE_PUSH
+ // setting of the peer endpoint is set to 0. An endpoint that
+ // has set this setting and has received acknowledgement MUST
+ // treat the receipt of a PUSH_PROMISE frame as a connection
+ // error (Section 5.4.1) of type PROTOCOL_ERROR."
+ return ConnectionError(ErrCodeProtocol)
+}
+
+func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) {
+ // TODO: do something with err? send it as a debug frame to the peer?
+ // But that's only in GOAWAY. Invent a new frame type? Is there one already?
+ cc.wmu.Lock()
+ cc.fr.WriteRSTStream(streamID, code)
+ cc.bw.Flush()
+ cc.wmu.Unlock()
+}
+
+var (
+ errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit")
+ errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers")
+)
+
+func (cc *ClientConn) logf(format string, args ...interface{}) {
+ cc.t.logf(format, args...)
+}
+
+func (cc *ClientConn) vlogf(format string, args ...interface{}) {
+ cc.t.vlogf(format, args...)
+}
+
+func (t *Transport) vlogf(format string, args ...interface{}) {
+ if VerboseLogs {
+ t.logf(format, args...)
+ }
+}
+
+func (t *Transport) logf(format string, args ...interface{}) {
+ log.Printf(format, args...)
+}
+
+var noBody io.ReadCloser = ioutil.NopCloser(bytes.NewReader(nil))
+
+func strSliceContains(ss []string, s string) bool {
+ for _, v := range ss {
+ if v == s {
+ return true
+ }
+ }
+ return false
+}
+
+type erringRoundTripper struct{ err error }
+
+func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err }
+
+// gzipReader wraps a response body so it can lazily
+// call gzip.NewReader on the first call to Read
+type gzipReader struct {
+ body io.ReadCloser // underlying Response.Body
+ zr *gzip.Reader // lazily-initialized gzip reader
+ zerr error // sticky error
+}
+
+func (gz *gzipReader) Read(p []byte) (n int, err error) {
+ if gz.zerr != nil {
+ return 0, gz.zerr
+ }
+ if gz.zr == nil {
+ gz.zr, err = gzip.NewReader(gz.body)
+ if err != nil {
+ gz.zerr = err
+ return 0, err
+ }
+ }
+ return gz.zr.Read(p)
+}
+
+func (gz *gzipReader) Close() error {
+ return gz.body.Close()
+}
+
+type errorReader struct{ err error }
+
+func (r errorReader) Read(p []byte) (int, error) { return 0, r.err }
+
+// bodyWriterState encapsulates various state around the Transport's writing
+// of the request body, particularly regarding doing delayed writes of the body
+// when the request contains "Expect: 100-continue".
+type bodyWriterState struct {
+ cs *clientStream
+ timer *time.Timer // if non-nil, we're doing a delayed write
+ fnonce *sync.Once // to call fn with
+ fn func() // the code to run in the goroutine, writing the body
+ resc chan error // result of fn's execution
+ delay time.Duration // how long we should delay a delayed write for
+}
+
+func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s bodyWriterState) {
+ s.cs = cs
+ if body == nil {
+ return
+ }
+ resc := make(chan error, 1)
+ s.resc = resc
+ s.fn = func() {
+ resc <- cs.writeRequestBody(body, cs.req.Body)
+ }
+ s.delay = t.expectContinueTimeout()
+ if s.delay == 0 ||
+ !httplex.HeaderValuesContainsToken(
+ cs.req.Header["Expect"],
+ "100-continue") {
+ return
+ }
+ s.fnonce = new(sync.Once)
+
+ // Arm the timer with a very large duration, which we'll
+ // intentionally lower later. It has to be large now because
+ // we need a handle to it before writing the headers, but the
+ // s.delay value is defined to not start until after the
+ // request headers were written.
+ const hugeDuration = 365 * 24 * time.Hour
+ s.timer = time.AfterFunc(hugeDuration, func() {
+ s.fnonce.Do(s.fn)
+ })
+ return
+}
+
+func (s bodyWriterState) cancel() {
+ if s.timer != nil {
+ s.timer.Stop()
+ }
+}
+
+func (s bodyWriterState) on100() {
+ if s.timer == nil {
+ // If we didn't do a delayed write, ignore the server's
+ // bogus 100 continue response.
+ return
+ }
+ s.timer.Stop()
+ go func() { s.fnonce.Do(s.fn) }()
+}
+
+// scheduleBodyWrite starts writing the body, either immediately (in
+// the common case) or after the delay timeout. It should not be
+// called until after the headers have been written.
+func (s bodyWriterState) scheduleBodyWrite() {
+ if s.timer == nil {
+ // We're not doing a delayed write (see
+ // getBodyWriterState), so just start the writing
+ // goroutine immediately.
+ go s.fn()
+ return
+ }
+ traceWait100Continue(s.cs.trace)
+ if s.timer.Stop() {
+ s.timer.Reset(s.delay)
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go
new file mode 100644
index 0000000..27ef0dd
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/write.go
@@ -0,0 +1,264 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "bytes"
+ "fmt"
+ "log"
+ "net/http"
+ "time"
+
+ "golang.org/x/net/http2/hpack"
+ "golang.org/x/net/lex/httplex"
+)
+
+// writeFramer is implemented by any type that is used to write frames.
+type writeFramer interface {
+ writeFrame(writeContext) error
+}
+
+// writeContext is the interface needed by the various frame writer
+// types below. All the writeFrame methods below are scheduled via the
+// frame writing scheduler (see writeScheduler in writesched.go).
+//
+// This interface is implemented by *serverConn.
+//
+// TODO: decide whether to a) use this in the client code (which didn't
+// end up using this yet, because it has a simpler design, not
+// currently implementing priorities), or b) delete this and
+// make the server code a bit more concrete.
+type writeContext interface {
+ Framer() *Framer
+ Flush() error
+ CloseConn() error
+ // HeaderEncoder returns an HPACK encoder that writes to the
+ // returned buffer.
+ HeaderEncoder() (*hpack.Encoder, *bytes.Buffer)
+}
+
+// endsStream reports whether the given frame writer w will locally
+// close the stream.
+func endsStream(w writeFramer) bool {
+ switch v := w.(type) {
+ case *writeData:
+ return v.endStream
+ case *writeResHeaders:
+ return v.endStream
+ case nil:
+ // This can only happen if the caller reuses w after it's
+ // been intentionally nil'ed out to prevent use. Keep this
+ // here to catch future refactoring breaking it.
+ panic("endsStream called on nil writeFramer")
+ }
+ return false
+}
+
+type flushFrameWriter struct{}
+
+func (flushFrameWriter) writeFrame(ctx writeContext) error {
+ return ctx.Flush()
+}
+
+type writeSettings []Setting
+
+func (s writeSettings) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteSettings([]Setting(s)...)
+}
+
+type writeGoAway struct {
+ maxStreamID uint32
+ code ErrCode
+}
+
+func (p *writeGoAway) writeFrame(ctx writeContext) error {
+ err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil)
+ if p.code != 0 {
+ ctx.Flush() // ignore error: we're hanging up on them anyway
+ time.Sleep(50 * time.Millisecond)
+ ctx.CloseConn()
+ }
+ return err
+}
+
+type writeData struct {
+ streamID uint32
+ p []byte
+ endStream bool
+}
+
+func (w *writeData) String() string {
+ return fmt.Sprintf("writeData(stream=%d, p=%d, endStream=%v)", w.streamID, len(w.p), w.endStream)
+}
+
+func (w *writeData) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteData(w.streamID, w.endStream, w.p)
+}
+
+// handlerPanicRST is the message sent from handler goroutines when
+// the handler panics.
+type handlerPanicRST struct {
+ StreamID uint32
+}
+
+func (hp handlerPanicRST) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteRSTStream(hp.StreamID, ErrCodeInternal)
+}
+
+func (se StreamError) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteRSTStream(se.StreamID, se.Code)
+}
+
+type writePingAck struct{ pf *PingFrame }
+
+func (w writePingAck) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WritePing(true, w.pf.Data)
+}
+
+type writeSettingsAck struct{}
+
+func (writeSettingsAck) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteSettingsAck()
+}
+
+// writeResHeaders is a request to write a HEADERS and 0+ CONTINUATION frames
+// for HTTP response headers or trailers from a server handler.
+type writeResHeaders struct {
+ streamID uint32
+ httpResCode int // 0 means no ":status" line
+ h http.Header // may be nil
+ trailers []string // if non-nil, which keys of h to write. nil means all.
+ endStream bool
+
+ date string
+ contentType string
+ contentLength string
+}
+
+func encKV(enc *hpack.Encoder, k, v string) {
+ if VerboseLogs {
+ log.Printf("http2: server encoding header %q = %q", k, v)
+ }
+ enc.WriteField(hpack.HeaderField{Name: k, Value: v})
+}
+
+func (w *writeResHeaders) writeFrame(ctx writeContext) error {
+ enc, buf := ctx.HeaderEncoder()
+ buf.Reset()
+
+ if w.httpResCode != 0 {
+ encKV(enc, ":status", httpCodeString(w.httpResCode))
+ }
+
+ encodeHeaders(enc, w.h, w.trailers)
+
+ if w.contentType != "" {
+ encKV(enc, "content-type", w.contentType)
+ }
+ if w.contentLength != "" {
+ encKV(enc, "content-length", w.contentLength)
+ }
+ if w.date != "" {
+ encKV(enc, "date", w.date)
+ }
+
+ headerBlock := buf.Bytes()
+ if len(headerBlock) == 0 && w.trailers == nil {
+ panic("unexpected empty hpack")
+ }
+
+ // For now we're lazy and just pick the minimum MAX_FRAME_SIZE
+ // that all peers must support (16KB). Later we could care
+ // more and send larger frames if the peer advertised it, but
+ // there's little point. Most headers are small anyway (so we
+ // generally won't have CONTINUATION frames), and extra frames
+ // only waste 9 bytes anyway.
+ const maxFrameSize = 16384
+
+ first := true
+ for len(headerBlock) > 0 {
+ frag := headerBlock
+ if len(frag) > maxFrameSize {
+ frag = frag[:maxFrameSize]
+ }
+ headerBlock = headerBlock[len(frag):]
+ endHeaders := len(headerBlock) == 0
+ var err error
+ if first {
+ first = false
+ err = ctx.Framer().WriteHeaders(HeadersFrameParam{
+ StreamID: w.streamID,
+ BlockFragment: frag,
+ EndStream: w.endStream,
+ EndHeaders: endHeaders,
+ })
+ } else {
+ err = ctx.Framer().WriteContinuation(w.streamID, endHeaders, frag)
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+type write100ContinueHeadersFrame struct {
+ streamID uint32
+}
+
+func (w write100ContinueHeadersFrame) writeFrame(ctx writeContext) error {
+ enc, buf := ctx.HeaderEncoder()
+ buf.Reset()
+ encKV(enc, ":status", "100")
+ return ctx.Framer().WriteHeaders(HeadersFrameParam{
+ StreamID: w.streamID,
+ BlockFragment: buf.Bytes(),
+ EndStream: false,
+ EndHeaders: true,
+ })
+}
+
+type writeWindowUpdate struct {
+ streamID uint32 // or 0 for conn-level
+ n uint32
+}
+
+func (wu writeWindowUpdate) writeFrame(ctx writeContext) error {
+ return ctx.Framer().WriteWindowUpdate(wu.streamID, wu.n)
+}
+
+func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) {
+ if keys == nil {
+ sorter := sorterPool.Get().(*sorter)
+ // Using defer here, since the returned keys from the
+ // sorter.Keys method is only valid until the sorter
+ // is returned:
+ defer sorterPool.Put(sorter)
+ keys = sorter.Keys(h)
+ }
+ for _, k := range keys {
+ vv := h[k]
+ k = lowerHeader(k)
+ if !validWireHeaderFieldName(k) {
+ // Skip it as backup paranoia. Per
+ // golang.org/issue/14048, these should
+ // already be rejected at a higher level.
+ continue
+ }
+ isTE := k == "transfer-encoding"
+ for _, v := range vv {
+ if !httplex.ValidHeaderFieldValue(v) {
+ // TODO: return an error? golang.org/issue/14048
+ // For now just omit it.
+ continue
+ }
+ // TODO: more of "8.1.2.2 Connection-Specific Header Fields"
+ if isTE && v != "trailers" {
+ continue
+ }
+ encKV(enc, k, v)
+ }
+ }
+}
diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go
new file mode 100644
index 0000000..c24316c
--- /dev/null
+++ b/vendor/golang.org/x/net/http2/writesched.go
@@ -0,0 +1,283 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import "fmt"
+
+// frameWriteMsg is a request to write a frame.
+type frameWriteMsg struct {
+ // write is the interface value that does the writing, once the
+ // writeScheduler (below) has decided to select this frame
+ // to write. The write functions are all defined in write.go.
+ write writeFramer
+
+ stream *stream // used for prioritization. nil for non-stream frames.
+
+ // done, if non-nil, must be a buffered channel with space for
+ // 1 message and is sent the return value from write (or an
+ // earlier error) when the frame has been written.
+ done chan error
+}
+
+// for debugging only:
+func (wm frameWriteMsg) String() string {
+ var streamID uint32
+ if wm.stream != nil {
+ streamID = wm.stream.id
+ }
+ var des string
+ if s, ok := wm.write.(fmt.Stringer); ok {
+ des = s.String()
+ } else {
+ des = fmt.Sprintf("%T", wm.write)
+ }
+ return fmt.Sprintf("[frameWriteMsg stream=%d, ch=%v, type: %v]", streamID, wm.done != nil, des)
+}
+
+// writeScheduler tracks pending frames to write, priorities, and decides
+// the next one to use. It is not thread-safe.
+type writeScheduler struct {
+ // zero are frames not associated with a specific stream.
+ // They're sent before any stream-specific freams.
+ zero writeQueue
+
+ // maxFrameSize is the maximum size of a DATA frame
+ // we'll write. Must be non-zero and between 16K-16M.
+ maxFrameSize uint32
+
+ // sq contains the stream-specific queues, keyed by stream ID.
+ // when a stream is idle, it's deleted from the map.
+ sq map[uint32]*writeQueue
+
+ // canSend is a slice of memory that's reused between frame
+ // scheduling decisions to hold the list of writeQueues (from sq)
+ // which have enough flow control data to send. After canSend is
+ // built, the best is selected.
+ canSend []*writeQueue
+
+ // pool of empty queues for reuse.
+ queuePool []*writeQueue
+}
+
+func (ws *writeScheduler) putEmptyQueue(q *writeQueue) {
+ if len(q.s) != 0 {
+ panic("queue must be empty")
+ }
+ ws.queuePool = append(ws.queuePool, q)
+}
+
+func (ws *writeScheduler) getEmptyQueue() *writeQueue {
+ ln := len(ws.queuePool)
+ if ln == 0 {
+ return new(writeQueue)
+ }
+ q := ws.queuePool[ln-1]
+ ws.queuePool = ws.queuePool[:ln-1]
+ return q
+}
+
+func (ws *writeScheduler) empty() bool { return ws.zero.empty() && len(ws.sq) == 0 }
+
+func (ws *writeScheduler) add(wm frameWriteMsg) {
+ st := wm.stream
+ if st == nil {
+ ws.zero.push(wm)
+ } else {
+ ws.streamQueue(st.id).push(wm)
+ }
+}
+
+func (ws *writeScheduler) streamQueue(streamID uint32) *writeQueue {
+ if q, ok := ws.sq[streamID]; ok {
+ return q
+ }
+ if ws.sq == nil {
+ ws.sq = make(map[uint32]*writeQueue)
+ }
+ q := ws.getEmptyQueue()
+ ws.sq[streamID] = q
+ return q
+}
+
+// take returns the most important frame to write and removes it from the scheduler.
+// It is illegal to call this if the scheduler is empty or if there are no connection-level
+// flow control bytes available.
+func (ws *writeScheduler) take() (wm frameWriteMsg, ok bool) {
+ if ws.maxFrameSize == 0 {
+ panic("internal error: ws.maxFrameSize not initialized or invalid")
+ }
+
+ // If there any frames not associated with streams, prefer those first.
+ // These are usually SETTINGS, etc.
+ if !ws.zero.empty() {
+ return ws.zero.shift(), true
+ }
+ if len(ws.sq) == 0 {
+ return
+ }
+
+ // Next, prioritize frames on streams that aren't DATA frames (no cost).
+ for id, q := range ws.sq {
+ if q.firstIsNoCost() {
+ return ws.takeFrom(id, q)
+ }
+ }
+
+ // Now, all that remains are DATA frames with non-zero bytes to
+ // send. So pick the best one.
+ if len(ws.canSend) != 0 {
+ panic("should be empty")
+ }
+ for _, q := range ws.sq {
+ if n := ws.streamWritableBytes(q); n > 0 {
+ ws.canSend = append(ws.canSend, q)
+ }
+ }
+ if len(ws.canSend) == 0 {
+ return
+ }
+ defer ws.zeroCanSend()
+
+ // TODO: find the best queue
+ q := ws.canSend[0]
+
+ return ws.takeFrom(q.streamID(), q)
+}
+
+// zeroCanSend is defered from take.
+func (ws *writeScheduler) zeroCanSend() {
+ for i := range ws.canSend {
+ ws.canSend[i] = nil
+ }
+ ws.canSend = ws.canSend[:0]
+}
+
+// streamWritableBytes returns the number of DATA bytes we could write
+// from the given queue's stream, if this stream/queue were
+// selected. It is an error to call this if q's head isn't a
+// *writeData.
+func (ws *writeScheduler) streamWritableBytes(q *writeQueue) int32 {
+ wm := q.head()
+ ret := wm.stream.flow.available() // max we can write
+ if ret == 0 {
+ return 0
+ }
+ if int32(ws.maxFrameSize) < ret {
+ ret = int32(ws.maxFrameSize)
+ }
+ if ret == 0 {
+ panic("internal error: ws.maxFrameSize not initialized or invalid")
+ }
+ wd := wm.write.(*writeData)
+ if len(wd.p) < int(ret) {
+ ret = int32(len(wd.p))
+ }
+ return ret
+}
+
+func (ws *writeScheduler) takeFrom(id uint32, q *writeQueue) (wm frameWriteMsg, ok bool) {
+ wm = q.head()
+ // If the first item in this queue costs flow control tokens
+ // and we don't have enough, write as much as we can.
+ if wd, ok := wm.write.(*writeData); ok && len(wd.p) > 0 {
+ allowed := wm.stream.flow.available() // max we can write
+ if allowed == 0 {
+ // No quota available. Caller can try the next stream.
+ return frameWriteMsg{}, false
+ }
+ if int32(ws.maxFrameSize) < allowed {
+ allowed = int32(ws.maxFrameSize)
+ }
+ // TODO: further restrict the allowed size, because even if
+ // the peer says it's okay to write 16MB data frames, we might
+ // want to write smaller ones to properly weight competing
+ // streams' priorities.
+
+ if len(wd.p) > int(allowed) {
+ wm.stream.flow.take(allowed)
+ chunk := wd.p[:allowed]
+ wd.p = wd.p[allowed:]
+ // Make up a new write message of a valid size, rather
+ // than shifting one off the queue.
+ return frameWriteMsg{
+ stream: wm.stream,
+ write: &writeData{
+ streamID: wd.streamID,
+ p: chunk,
+ // even if the original had endStream set, there
+ // arebytes remaining because len(wd.p) > allowed,
+ // so we know endStream is false:
+ endStream: false,
+ },
+ // our caller is blocking on the final DATA frame, not
+ // these intermediates, so no need to wait:
+ done: nil,
+ }, true
+ }
+ wm.stream.flow.take(int32(len(wd.p)))
+ }
+
+ q.shift()
+ if q.empty() {
+ ws.putEmptyQueue(q)
+ delete(ws.sq, id)
+ }
+ return wm, true
+}
+
+func (ws *writeScheduler) forgetStream(id uint32) {
+ q, ok := ws.sq[id]
+ if !ok {
+ return
+ }
+ delete(ws.sq, id)
+
+ // But keep it for others later.
+ for i := range q.s {
+ q.s[i] = frameWriteMsg{}
+ }
+ q.s = q.s[:0]
+ ws.putEmptyQueue(q)
+}
+
+type writeQueue struct {
+ s []frameWriteMsg
+}
+
+// streamID returns the stream ID for a non-empty stream-specific queue.
+func (q *writeQueue) streamID() uint32 { return q.s[0].stream.id }
+
+func (q *writeQueue) empty() bool { return len(q.s) == 0 }
+
+func (q *writeQueue) push(wm frameWriteMsg) {
+ q.s = append(q.s, wm)
+}
+
+// head returns the next item that would be removed by shift.
+func (q *writeQueue) head() frameWriteMsg {
+ if len(q.s) == 0 {
+ panic("invalid use of queue")
+ }
+ return q.s[0]
+}
+
+func (q *writeQueue) shift() frameWriteMsg {
+ if len(q.s) == 0 {
+ panic("invalid use of queue")
+ }
+ wm := q.s[0]
+ // TODO: less copy-happy queue.
+ copy(q.s, q.s[1:])
+ q.s[len(q.s)-1] = frameWriteMsg{}
+ q.s = q.s[:len(q.s)-1]
+ return wm
+}
+
+func (q *writeQueue) firstIsNoCost() bool {
+ if df, ok := q.s[0].write.(*writeData); ok {
+ return len(df.p) == 0
+ }
+ return true
+}
diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go
new file mode 100644
index 0000000..1119f34
--- /dev/null
+++ b/vendor/golang.org/x/net/internal/timeseries/timeseries.go
@@ -0,0 +1,525 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package timeseries implements a time series structure for stats collection.
+package timeseries // import "golang.org/x/net/internal/timeseries"
+
+import (
+ "fmt"
+ "log"
+ "time"
+)
+
+const (
+ timeSeriesNumBuckets = 64
+ minuteHourSeriesNumBuckets = 60
+)
+
+var timeSeriesResolutions = []time.Duration{
+ 1 * time.Second,
+ 10 * time.Second,
+ 1 * time.Minute,
+ 10 * time.Minute,
+ 1 * time.Hour,
+ 6 * time.Hour,
+ 24 * time.Hour, // 1 day
+ 7 * 24 * time.Hour, // 1 week
+ 4 * 7 * 24 * time.Hour, // 4 weeks
+ 16 * 7 * 24 * time.Hour, // 16 weeks
+}
+
+var minuteHourSeriesResolutions = []time.Duration{
+ 1 * time.Second,
+ 1 * time.Minute,
+}
+
+// An Observable is a kind of data that can be aggregated in a time series.
+type Observable interface {
+ Multiply(ratio float64) // Multiplies the data in self by a given ratio
+ Add(other Observable) // Adds the data from a different observation to self
+ Clear() // Clears the observation so it can be reused.
+ CopyFrom(other Observable) // Copies the contents of a given observation to self
+}
+
+// Float attaches the methods of Observable to a float64.
+type Float float64
+
+// NewFloat returns a Float.
+func NewFloat() Observable {
+ f := Float(0)
+ return &f
+}
+
+// String returns the float as a string.
+func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) }
+
+// Value returns the float's value.
+func (f *Float) Value() float64 { return float64(*f) }
+
+func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) }
+
+func (f *Float) Add(other Observable) {
+ o := other.(*Float)
+ *f += *o
+}
+
+func (f *Float) Clear() { *f = 0 }
+
+func (f *Float) CopyFrom(other Observable) {
+ o := other.(*Float)
+ *f = *o
+}
+
+// A Clock tells the current time.
+type Clock interface {
+ Time() time.Time
+}
+
+type defaultClock int
+
+var defaultClockInstance defaultClock
+
+func (defaultClock) Time() time.Time { return time.Now() }
+
+// Information kept per level. Each level consists of a circular list of
+// observations. The start of the level may be derived from end and the
+// len(buckets) * sizeInMillis.
+type tsLevel struct {
+ oldest int // index to oldest bucketed Observable
+ newest int // index to newest bucketed Observable
+ end time.Time // end timestamp for this level
+ size time.Duration // duration of the bucketed Observable
+ buckets []Observable // collections of observations
+ provider func() Observable // used for creating new Observable
+}
+
+func (l *tsLevel) Clear() {
+ l.oldest = 0
+ l.newest = len(l.buckets) - 1
+ l.end = time.Time{}
+ for i := range l.buckets {
+ if l.buckets[i] != nil {
+ l.buckets[i].Clear()
+ l.buckets[i] = nil
+ }
+ }
+}
+
+func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) {
+ l.size = size
+ l.provider = f
+ l.buckets = make([]Observable, numBuckets)
+}
+
+// Keeps a sequence of levels. Each level is responsible for storing data at
+// a given resolution. For example, the first level stores data at a one
+// minute resolution while the second level stores data at a one hour
+// resolution.
+
+// Each level is represented by a sequence of buckets. Each bucket spans an
+// interval equal to the resolution of the level. New observations are added
+// to the last bucket.
+type timeSeries struct {
+ provider func() Observable // make more Observable
+ numBuckets int // number of buckets in each level
+ levels []*tsLevel // levels of bucketed Observable
+ lastAdd time.Time // time of last Observable tracked
+ total Observable // convenient aggregation of all Observable
+ clock Clock // Clock for getting current time
+ pending Observable // observations not yet bucketed
+ pendingTime time.Time // what time are we keeping in pending
+ dirty bool // if there are pending observations
+}
+
+// init initializes a level according to the supplied criteria.
+func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) {
+ ts.provider = f
+ ts.numBuckets = numBuckets
+ ts.clock = clock
+ ts.levels = make([]*tsLevel, len(resolutions))
+
+ for i := range resolutions {
+ if i > 0 && resolutions[i-1] >= resolutions[i] {
+ log.Print("timeseries: resolutions must be monotonically increasing")
+ break
+ }
+ newLevel := new(tsLevel)
+ newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider)
+ ts.levels[i] = newLevel
+ }
+
+ ts.Clear()
+}
+
+// Clear removes all observations from the time series.
+func (ts *timeSeries) Clear() {
+ ts.lastAdd = time.Time{}
+ ts.total = ts.resetObservation(ts.total)
+ ts.pending = ts.resetObservation(ts.pending)
+ ts.pendingTime = time.Time{}
+ ts.dirty = false
+
+ for i := range ts.levels {
+ ts.levels[i].Clear()
+ }
+}
+
+// Add records an observation at the current time.
+func (ts *timeSeries) Add(observation Observable) {
+ ts.AddWithTime(observation, ts.clock.Time())
+}
+
+// AddWithTime records an observation at the specified time.
+func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) {
+
+ smallBucketDuration := ts.levels[0].size
+
+ if t.After(ts.lastAdd) {
+ ts.lastAdd = t
+ }
+
+ if t.After(ts.pendingTime) {
+ ts.advance(t)
+ ts.mergePendingUpdates()
+ ts.pendingTime = ts.levels[0].end
+ ts.pending.CopyFrom(observation)
+ ts.dirty = true
+ } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) {
+ // The observation is close enough to go into the pending bucket.
+ // This compensates for clock skewing and small scheduling delays
+ // by letting the update stay in the fast path.
+ ts.pending.Add(observation)
+ ts.dirty = true
+ } else {
+ ts.mergeValue(observation, t)
+ }
+}
+
+// mergeValue inserts the observation at the specified time in the past into all levels.
+func (ts *timeSeries) mergeValue(observation Observable, t time.Time) {
+ for _, level := range ts.levels {
+ index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size)
+ if 0 <= index && index < ts.numBuckets {
+ bucketNumber := (level.oldest + index) % ts.numBuckets
+ if level.buckets[bucketNumber] == nil {
+ level.buckets[bucketNumber] = level.provider()
+ }
+ level.buckets[bucketNumber].Add(observation)
+ }
+ }
+ ts.total.Add(observation)
+}
+
+// mergePendingUpdates applies the pending updates into all levels.
+func (ts *timeSeries) mergePendingUpdates() {
+ if ts.dirty {
+ ts.mergeValue(ts.pending, ts.pendingTime)
+ ts.pending = ts.resetObservation(ts.pending)
+ ts.dirty = false
+ }
+}
+
+// advance cycles the buckets at each level until the latest bucket in
+// each level can hold the time specified.
+func (ts *timeSeries) advance(t time.Time) {
+ if !t.After(ts.levels[0].end) {
+ return
+ }
+ for i := 0; i < len(ts.levels); i++ {
+ level := ts.levels[i]
+ if !level.end.Before(t) {
+ break
+ }
+
+ // If the time is sufficiently far, just clear the level and advance
+ // directly.
+ if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) {
+ for _, b := range level.buckets {
+ ts.resetObservation(b)
+ }
+ level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds())
+ }
+
+ for t.After(level.end) {
+ level.end = level.end.Add(level.size)
+ level.newest = level.oldest
+ level.oldest = (level.oldest + 1) % ts.numBuckets
+ ts.resetObservation(level.buckets[level.newest])
+ }
+
+ t = level.end
+ }
+}
+
+// Latest returns the sum of the num latest buckets from the level.
+func (ts *timeSeries) Latest(level, num int) Observable {
+ now := ts.clock.Time()
+ if ts.levels[0].end.Before(now) {
+ ts.advance(now)
+ }
+
+ ts.mergePendingUpdates()
+
+ result := ts.provider()
+ l := ts.levels[level]
+ index := l.newest
+
+ for i := 0; i < num; i++ {
+ if l.buckets[index] != nil {
+ result.Add(l.buckets[index])
+ }
+ if index == 0 {
+ index = ts.numBuckets
+ }
+ index--
+ }
+
+ return result
+}
+
+// LatestBuckets returns a copy of the num latest buckets from level.
+func (ts *timeSeries) LatestBuckets(level, num int) []Observable {
+ if level < 0 || level > len(ts.levels) {
+ log.Print("timeseries: bad level argument: ", level)
+ return nil
+ }
+ if num < 0 || num >= ts.numBuckets {
+ log.Print("timeseries: bad num argument: ", num)
+ return nil
+ }
+
+ results := make([]Observable, num)
+ now := ts.clock.Time()
+ if ts.levels[0].end.Before(now) {
+ ts.advance(now)
+ }
+
+ ts.mergePendingUpdates()
+
+ l := ts.levels[level]
+ index := l.newest
+
+ for i := 0; i < num; i++ {
+ result := ts.provider()
+ results[i] = result
+ if l.buckets[index] != nil {
+ result.CopyFrom(l.buckets[index])
+ }
+
+ if index == 0 {
+ index = ts.numBuckets
+ }
+ index -= 1
+ }
+ return results
+}
+
+// ScaleBy updates observations by scaling by factor.
+func (ts *timeSeries) ScaleBy(factor float64) {
+ for _, l := range ts.levels {
+ for i := 0; i < ts.numBuckets; i++ {
+ l.buckets[i].Multiply(factor)
+ }
+ }
+
+ ts.total.Multiply(factor)
+ ts.pending.Multiply(factor)
+}
+
+// Range returns the sum of observations added over the specified time range.
+// If start or finish times don't fall on bucket boundaries of the same
+// level, then return values are approximate answers.
+func (ts *timeSeries) Range(start, finish time.Time) Observable {
+ return ts.ComputeRange(start, finish, 1)[0]
+}
+
+// Recent returns the sum of observations from the last delta.
+func (ts *timeSeries) Recent(delta time.Duration) Observable {
+ now := ts.clock.Time()
+ return ts.Range(now.Add(-delta), now)
+}
+
+// Total returns the total of all observations.
+func (ts *timeSeries) Total() Observable {
+ ts.mergePendingUpdates()
+ return ts.total
+}
+
+// ComputeRange computes a specified number of values into a slice using
+// the observations recorded over the specified time period. The return
+// values are approximate if the start or finish times don't fall on the
+// bucket boundaries at the same level or if the number of buckets spanning
+// the range is not an integral multiple of num.
+func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable {
+ if start.After(finish) {
+ log.Printf("timeseries: start > finish, %v>%v", start, finish)
+ return nil
+ }
+
+ if num < 0 {
+ log.Printf("timeseries: num < 0, %v", num)
+ return nil
+ }
+
+ results := make([]Observable, num)
+
+ for _, l := range ts.levels {
+ if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) {
+ ts.extract(l, start, finish, num, results)
+ return results
+ }
+ }
+
+ // Failed to find a level that covers the desired range. So just
+ // extract from the last level, even if it doesn't cover the entire
+ // desired range.
+ ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results)
+
+ return results
+}
+
+// RecentList returns the specified number of values in slice over the most
+// recent time period of the specified range.
+func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable {
+ if delta < 0 {
+ return nil
+ }
+ now := ts.clock.Time()
+ return ts.ComputeRange(now.Add(-delta), now, num)
+}
+
+// extract returns a slice of specified number of observations from a given
+// level over a given range.
+func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) {
+ ts.mergePendingUpdates()
+
+ srcInterval := l.size
+ dstInterval := finish.Sub(start) / time.Duration(num)
+ dstStart := start
+ srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets))
+
+ srcIndex := 0
+
+ // Where should scanning start?
+ if dstStart.After(srcStart) {
+ advance := dstStart.Sub(srcStart) / srcInterval
+ srcIndex += int(advance)
+ srcStart = srcStart.Add(advance * srcInterval)
+ }
+
+ // The i'th value is computed as show below.
+ // interval = (finish/start)/num
+ // i'th value = sum of observation in range
+ // [ start + i * interval,
+ // start + (i + 1) * interval )
+ for i := 0; i < num; i++ {
+ results[i] = ts.resetObservation(results[i])
+ dstEnd := dstStart.Add(dstInterval)
+ for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) {
+ srcEnd := srcStart.Add(srcInterval)
+ if srcEnd.After(ts.lastAdd) {
+ srcEnd = ts.lastAdd
+ }
+
+ if !srcEnd.Before(dstStart) {
+ srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets]
+ if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) {
+ // dst completely contains src.
+ if srcValue != nil {
+ results[i].Add(srcValue)
+ }
+ } else {
+ // dst partially overlaps src.
+ overlapStart := maxTime(srcStart, dstStart)
+ overlapEnd := minTime(srcEnd, dstEnd)
+ base := srcEnd.Sub(srcStart)
+ fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds()
+
+ used := ts.provider()
+ if srcValue != nil {
+ used.CopyFrom(srcValue)
+ }
+ used.Multiply(fraction)
+ results[i].Add(used)
+ }
+
+ if srcEnd.After(dstEnd) {
+ break
+ }
+ }
+ srcIndex++
+ srcStart = srcStart.Add(srcInterval)
+ }
+ dstStart = dstStart.Add(dstInterval)
+ }
+}
+
+// resetObservation clears the content so the struct may be reused.
+func (ts *timeSeries) resetObservation(observation Observable) Observable {
+ if observation == nil {
+ observation = ts.provider()
+ } else {
+ observation.Clear()
+ }
+ return observation
+}
+
+// TimeSeries tracks data at granularities from 1 second to 16 weeks.
+type TimeSeries struct {
+ timeSeries
+}
+
+// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable.
+func NewTimeSeries(f func() Observable) *TimeSeries {
+ return NewTimeSeriesWithClock(f, defaultClockInstance)
+}
+
+// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for
+// assigning timestamps.
+func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries {
+ ts := new(TimeSeries)
+ ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock)
+ return ts
+}
+
+// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour.
+type MinuteHourSeries struct {
+ timeSeries
+}
+
+// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable.
+func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries {
+ return NewMinuteHourSeriesWithClock(f, defaultClockInstance)
+}
+
+// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for
+// assigning timestamps.
+func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries {
+ ts := new(MinuteHourSeries)
+ ts.timeSeries.init(minuteHourSeriesResolutions, f,
+ minuteHourSeriesNumBuckets, clock)
+ return ts
+}
+
+func (ts *MinuteHourSeries) Minute() Observable {
+ return ts.timeSeries.Latest(0, 60)
+}
+
+func (ts *MinuteHourSeries) Hour() Observable {
+ return ts.timeSeries.Latest(1, 60)
+}
+
+func minTime(a, b time.Time) time.Time {
+ if a.Before(b) {
+ return a
+ }
+ return b
+}
+
+func maxTime(a, b time.Time) time.Time {
+ if a.After(b) {
+ return a
+ }
+ return b
+}
diff --git a/vendor/golang.org/x/net/lex/httplex/httplex.go b/vendor/golang.org/x/net/lex/httplex/httplex.go
new file mode 100644
index 0000000..bd0ec24
--- /dev/null
+++ b/vendor/golang.org/x/net/lex/httplex/httplex.go
@@ -0,0 +1,312 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package httplex contains rules around lexical matters of various
+// HTTP-related specifications.
+//
+// This package is shared by the standard library (which vendors it)
+// and x/net/http2. It comes with no API stability promise.
+package httplex
+
+import (
+ "strings"
+ "unicode/utf8"
+)
+
+var isTokenTable = [127]bool{
+ '!': true,
+ '#': true,
+ '$': true,
+ '%': true,
+ '&': true,
+ '\'': true,
+ '*': true,
+ '+': true,
+ '-': true,
+ '.': true,
+ '0': true,
+ '1': true,
+ '2': true,
+ '3': true,
+ '4': true,
+ '5': true,
+ '6': true,
+ '7': true,
+ '8': true,
+ '9': true,
+ 'A': true,
+ 'B': true,
+ 'C': true,
+ 'D': true,
+ 'E': true,
+ 'F': true,
+ 'G': true,
+ 'H': true,
+ 'I': true,
+ 'J': true,
+ 'K': true,
+ 'L': true,
+ 'M': true,
+ 'N': true,
+ 'O': true,
+ 'P': true,
+ 'Q': true,
+ 'R': true,
+ 'S': true,
+ 'T': true,
+ 'U': true,
+ 'W': true,
+ 'V': true,
+ 'X': true,
+ 'Y': true,
+ 'Z': true,
+ '^': true,
+ '_': true,
+ '`': true,
+ 'a': true,
+ 'b': true,
+ 'c': true,
+ 'd': true,
+ 'e': true,
+ 'f': true,
+ 'g': true,
+ 'h': true,
+ 'i': true,
+ 'j': true,
+ 'k': true,
+ 'l': true,
+ 'm': true,
+ 'n': true,
+ 'o': true,
+ 'p': true,
+ 'q': true,
+ 'r': true,
+ 's': true,
+ 't': true,
+ 'u': true,
+ 'v': true,
+ 'w': true,
+ 'x': true,
+ 'y': true,
+ 'z': true,
+ '|': true,
+ '~': true,
+}
+
+func IsTokenRune(r rune) bool {
+ i := int(r)
+ return i < len(isTokenTable) && isTokenTable[i]
+}
+
+func isNotToken(r rune) bool {
+ return !IsTokenRune(r)
+}
+
+// HeaderValuesContainsToken reports whether any string in values
+// contains the provided token, ASCII case-insensitively.
+func HeaderValuesContainsToken(values []string, token string) bool {
+ for _, v := range values {
+ if headerValueContainsToken(v, token) {
+ return true
+ }
+ }
+ return false
+}
+
+// isOWS reports whether b is an optional whitespace byte, as defined
+// by RFC 7230 section 3.2.3.
+func isOWS(b byte) bool { return b == ' ' || b == '\t' }
+
+// trimOWS returns x with all optional whitespace removes from the
+// beginning and end.
+func trimOWS(x string) string {
+ // TODO: consider using strings.Trim(x, " \t") instead,
+ // if and when it's fast enough. See issue 10292.
+ // But this ASCII-only code will probably always beat UTF-8
+ // aware code.
+ for len(x) > 0 && isOWS(x[0]) {
+ x = x[1:]
+ }
+ for len(x) > 0 && isOWS(x[len(x)-1]) {
+ x = x[:len(x)-1]
+ }
+ return x
+}
+
+// headerValueContainsToken reports whether v (assumed to be a
+// 0#element, in the ABNF extension described in RFC 7230 section 7)
+// contains token amongst its comma-separated tokens, ASCII
+// case-insensitively.
+func headerValueContainsToken(v string, token string) bool {
+ v = trimOWS(v)
+ if comma := strings.IndexByte(v, ','); comma != -1 {
+ return tokenEqual(trimOWS(v[:comma]), token) || headerValueContainsToken(v[comma+1:], token)
+ }
+ return tokenEqual(v, token)
+}
+
+// lowerASCII returns the ASCII lowercase version of b.
+func lowerASCII(b byte) byte {
+ if 'A' <= b && b <= 'Z' {
+ return b + ('a' - 'A')
+ }
+ return b
+}
+
+// tokenEqual reports whether t1 and t2 are equal, ASCII case-insensitively.
+func tokenEqual(t1, t2 string) bool {
+ if len(t1) != len(t2) {
+ return false
+ }
+ for i, b := range t1 {
+ if b >= utf8.RuneSelf {
+ // No UTF-8 or non-ASCII allowed in tokens.
+ return false
+ }
+ if lowerASCII(byte(b)) != lowerASCII(t2[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// isLWS reports whether b is linear white space, according
+// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2
+// LWS = [CRLF] 1*( SP | HT )
+func isLWS(b byte) bool { return b == ' ' || b == '\t' }
+
+// isCTL reports whether b is a control byte, according
+// to http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2
+// CTL = <any US-ASCII control character
+// (octets 0 - 31) and DEL (127)>
+func isCTL(b byte) bool {
+ const del = 0x7f // a CTL
+ return b < ' ' || b == del
+}
+
+// ValidHeaderFieldName reports whether v is a valid HTTP/1.x header name.
+// HTTP/2 imposes the additional restriction that uppercase ASCII
+// letters are not allowed.
+//
+// RFC 7230 says:
+// header-field = field-name ":" OWS field-value OWS
+// field-name = token
+// token = 1*tchar
+// tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" / "+" / "-" / "." /
+// "^" / "_" / "`" / "|" / "~" / DIGIT / ALPHA
+func ValidHeaderFieldName(v string) bool {
+ if len(v) == 0 {
+ return false
+ }
+ for _, r := range v {
+ if !IsTokenRune(r) {
+ return false
+ }
+ }
+ return true
+}
+
+// ValidHostHeader reports whether h is a valid host header.
+func ValidHostHeader(h string) bool {
+ // The latest spec is actually this:
+ //
+ // http://tools.ietf.org/html/rfc7230#section-5.4
+ // Host = uri-host [ ":" port ]
+ //
+ // Where uri-host is:
+ // http://tools.ietf.org/html/rfc3986#section-3.2.2
+ //
+ // But we're going to be much more lenient for now and just
+ // search for any byte that's not a valid byte in any of those
+ // expressions.
+ for i := 0; i < len(h); i++ {
+ if !validHostByte[h[i]] {
+ return false
+ }
+ }
+ return true
+}
+
+// See the validHostHeader comment.
+var validHostByte = [256]bool{
+ '0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true, '7': true,
+ '8': true, '9': true,
+
+ 'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true, 'h': true,
+ 'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true, 'o': true, 'p': true,
+ 'q': true, 'r': true, 's': true, 't': true, 'u': true, 'v': true, 'w': true, 'x': true,
+ 'y': true, 'z': true,
+
+ 'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true, 'H': true,
+ 'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true, 'O': true, 'P': true,
+ 'Q': true, 'R': true, 'S': true, 'T': true, 'U': true, 'V': true, 'W': true, 'X': true,
+ 'Y': true, 'Z': true,
+
+ '!': true, // sub-delims
+ '$': true, // sub-delims
+ '%': true, // pct-encoded (and used in IPv6 zones)
+ '&': true, // sub-delims
+ '(': true, // sub-delims
+ ')': true, // sub-delims
+ '*': true, // sub-delims
+ '+': true, // sub-delims
+ ',': true, // sub-delims
+ '-': true, // unreserved
+ '.': true, // unreserved
+ ':': true, // IPv6address + Host expression's optional port
+ ';': true, // sub-delims
+ '=': true, // sub-delims
+ '[': true,
+ '\'': true, // sub-delims
+ ']': true,
+ '_': true, // unreserved
+ '~': true, // unreserved
+}
+
+// ValidHeaderFieldValue reports whether v is a valid "field-value" according to
+// http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 :
+//
+// message-header = field-name ":" [ field-value ]
+// field-value = *( field-content | LWS )
+// field-content = <the OCTETs making up the field-value
+// and consisting of either *TEXT or combinations
+// of token, separators, and quoted-string>
+//
+// http://www.w3.org/Protocols/rfc2616/rfc2616-sec2.html#sec2.2 :
+//
+// TEXT = <any OCTET except CTLs,
+// but including LWS>
+// LWS = [CRLF] 1*( SP | HT )
+// CTL = <any US-ASCII control character
+// (octets 0 - 31) and DEL (127)>
+//
+// RFC 7230 says:
+// field-value = *( field-content / obs-fold )
+// obj-fold = N/A to http2, and deprecated
+// field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
+// field-vchar = VCHAR / obs-text
+// obs-text = %x80-FF
+// VCHAR = "any visible [USASCII] character"
+//
+// http2 further says: "Similarly, HTTP/2 allows header field values
+// that are not valid. While most of the values that can be encoded
+// will not alter header field parsing, carriage return (CR, ASCII
+// 0xd), line feed (LF, ASCII 0xa), and the zero character (NUL, ASCII
+// 0x0) might be exploited by an attacker if they are translated
+// verbatim. Any request or response that contains a character not
+// permitted in a header field value MUST be treated as malformed
+// (Section 8.1.2.6). Valid characters are defined by the
+// field-content ABNF rule in Section 3.2 of [RFC7230]."
+//
+// This function does not (yet?) properly handle the rejection of
+// strings that begin or end with SP or HTAB.
+func ValidHeaderFieldValue(v string) bool {
+ for i := 0; i < len(v); i++ {
+ b := v[i]
+ if isCTL(b) && !isLWS(b) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go
new file mode 100644
index 0000000..e66c7e3
--- /dev/null
+++ b/vendor/golang.org/x/net/trace/events.go
@@ -0,0 +1,524 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+import (
+ "bytes"
+ "fmt"
+ "html/template"
+ "io"
+ "log"
+ "net/http"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "text/tabwriter"
+ "time"
+)
+
+var eventsTmpl = template.Must(template.New("events").Funcs(template.FuncMap{
+ "elapsed": elapsed,
+ "trimSpace": strings.TrimSpace,
+}).Parse(eventsHTML))
+
+const maxEventsPerLog = 100
+
+type bucket struct {
+ MaxErrAge time.Duration
+ String string
+}
+
+var buckets = []bucket{
+ {0, "total"},
+ {10 * time.Second, "errs<10s"},
+ {1 * time.Minute, "errs<1m"},
+ {10 * time.Minute, "errs<10m"},
+ {1 * time.Hour, "errs<1h"},
+ {10 * time.Hour, "errs<10h"},
+ {24000 * time.Hour, "errors"},
+}
+
+// RenderEvents renders the HTML page typically served at /debug/events.
+// It does not do any auth checking; see AuthRequest for the default auth check
+// used by the handler registered on http.DefaultServeMux.
+// req may be nil.
+func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) {
+ now := time.Now()
+ data := &struct {
+ Families []string // family names
+ Buckets []bucket
+ Counts [][]int // eventLog count per family/bucket
+
+ // Set when a bucket has been selected.
+ Family string
+ Bucket int
+ EventLogs eventLogs
+ Expanded bool
+ }{
+ Buckets: buckets,
+ }
+
+ data.Families = make([]string, 0, len(families))
+ famMu.RLock()
+ for name := range families {
+ data.Families = append(data.Families, name)
+ }
+ famMu.RUnlock()
+ sort.Strings(data.Families)
+
+ // Count the number of eventLogs in each family for each error age.
+ data.Counts = make([][]int, len(data.Families))
+ for i, name := range data.Families {
+ // TODO(sameer): move this loop under the family lock.
+ f := getEventFamily(name)
+ data.Counts[i] = make([]int, len(data.Buckets))
+ for j, b := range data.Buckets {
+ data.Counts[i][j] = f.Count(now, b.MaxErrAge)
+ }
+ }
+
+ if req != nil {
+ var ok bool
+ data.Family, data.Bucket, ok = parseEventsArgs(req)
+ if !ok {
+ // No-op
+ } else {
+ data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge)
+ }
+ if data.EventLogs != nil {
+ defer data.EventLogs.Free()
+ sort.Sort(data.EventLogs)
+ }
+ if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil {
+ data.Expanded = exp
+ }
+ }
+
+ famMu.RLock()
+ defer famMu.RUnlock()
+ if err := eventsTmpl.Execute(w, data); err != nil {
+ log.Printf("net/trace: Failed executing template: %v", err)
+ }
+}
+
+func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) {
+ fam, bStr := req.FormValue("fam"), req.FormValue("b")
+ if fam == "" || bStr == "" {
+ return "", 0, false
+ }
+ b, err := strconv.Atoi(bStr)
+ if err != nil || b < 0 || b >= len(buckets) {
+ return "", 0, false
+ }
+ return fam, b, true
+}
+
+// An EventLog provides a log of events associated with a specific object.
+type EventLog interface {
+ // Printf formats its arguments with fmt.Sprintf and adds the
+ // result to the event log.
+ Printf(format string, a ...interface{})
+
+ // Errorf is like Printf, but it marks this event as an error.
+ Errorf(format string, a ...interface{})
+
+ // Finish declares that this event log is complete.
+ // The event log should not be used after calling this method.
+ Finish()
+}
+
+// NewEventLog returns a new EventLog with the specified family name
+// and title.
+func NewEventLog(family, title string) EventLog {
+ el := newEventLog()
+ el.ref()
+ el.Family, el.Title = family, title
+ el.Start = time.Now()
+ el.events = make([]logEntry, 0, maxEventsPerLog)
+ el.stack = make([]uintptr, 32)
+ n := runtime.Callers(2, el.stack)
+ el.stack = el.stack[:n]
+
+ getEventFamily(family).add(el)
+ return el
+}
+
+func (el *eventLog) Finish() {
+ getEventFamily(el.Family).remove(el)
+ el.unref() // matches ref in New
+}
+
+var (
+ famMu sync.RWMutex
+ families = make(map[string]*eventFamily) // family name => family
+)
+
+func getEventFamily(fam string) *eventFamily {
+ famMu.Lock()
+ defer famMu.Unlock()
+ f := families[fam]
+ if f == nil {
+ f = &eventFamily{}
+ families[fam] = f
+ }
+ return f
+}
+
+type eventFamily struct {
+ mu sync.RWMutex
+ eventLogs eventLogs
+}
+
+func (f *eventFamily) add(el *eventLog) {
+ f.mu.Lock()
+ f.eventLogs = append(f.eventLogs, el)
+ f.mu.Unlock()
+}
+
+func (f *eventFamily) remove(el *eventLog) {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ for i, el0 := range f.eventLogs {
+ if el == el0 {
+ copy(f.eventLogs[i:], f.eventLogs[i+1:])
+ f.eventLogs = f.eventLogs[:len(f.eventLogs)-1]
+ return
+ }
+ }
+}
+
+func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) {
+ f.mu.RLock()
+ defer f.mu.RUnlock()
+ for _, el := range f.eventLogs {
+ if el.hasRecentError(now, maxErrAge) {
+ n++
+ }
+ }
+ return
+}
+
+func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) {
+ f.mu.RLock()
+ defer f.mu.RUnlock()
+ els = make(eventLogs, 0, len(f.eventLogs))
+ for _, el := range f.eventLogs {
+ if el.hasRecentError(now, maxErrAge) {
+ el.ref()
+ els = append(els, el)
+ }
+ }
+ return
+}
+
+type eventLogs []*eventLog
+
+// Free calls unref on each element of the list.
+func (els eventLogs) Free() {
+ for _, el := range els {
+ el.unref()
+ }
+}
+
+// eventLogs may be sorted in reverse chronological order.
+func (els eventLogs) Len() int { return len(els) }
+func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) }
+func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] }
+
+// A logEntry is a timestamped log entry in an event log.
+type logEntry struct {
+ When time.Time
+ Elapsed time.Duration // since previous event in log
+ NewDay bool // whether this event is on a different day to the previous event
+ What string
+ IsErr bool
+}
+
+// WhenString returns a string representation of the elapsed time of the event.
+// It will include the date if midnight was crossed.
+func (e logEntry) WhenString() string {
+ if e.NewDay {
+ return e.When.Format("2006/01/02 15:04:05.000000")
+ }
+ return e.When.Format("15:04:05.000000")
+}
+
+// An eventLog represents an active event log.
+type eventLog struct {
+ // Family is the top-level grouping of event logs to which this belongs.
+ Family string
+
+ // Title is the title of this event log.
+ Title string
+
+ // Timing information.
+ Start time.Time
+
+ // Call stack where this event log was created.
+ stack []uintptr
+
+ // Append-only sequence of events.
+ //
+ // TODO(sameer): change this to a ring buffer to avoid the array copy
+ // when we hit maxEventsPerLog.
+ mu sync.RWMutex
+ events []logEntry
+ LastErrorTime time.Time
+ discarded int
+
+ refs int32 // how many buckets this is in
+}
+
+func (el *eventLog) reset() {
+ // Clear all but the mutex. Mutexes may not be copied, even when unlocked.
+ el.Family = ""
+ el.Title = ""
+ el.Start = time.Time{}
+ el.stack = nil
+ el.events = nil
+ el.LastErrorTime = time.Time{}
+ el.discarded = 0
+ el.refs = 0
+}
+
+func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool {
+ if maxErrAge == 0 {
+ return true
+ }
+ el.mu.RLock()
+ defer el.mu.RUnlock()
+ return now.Sub(el.LastErrorTime) < maxErrAge
+}
+
+// delta returns the elapsed time since the last event or the log start,
+// and whether it spans midnight.
+// L >= el.mu
+func (el *eventLog) delta(t time.Time) (time.Duration, bool) {
+ if len(el.events) == 0 {
+ return t.Sub(el.Start), false
+ }
+ prev := el.events[len(el.events)-1].When
+ return t.Sub(prev), prev.Day() != t.Day()
+
+}
+
+func (el *eventLog) Printf(format string, a ...interface{}) {
+ el.printf(false, format, a...)
+}
+
+func (el *eventLog) Errorf(format string, a ...interface{}) {
+ el.printf(true, format, a...)
+}
+
+func (el *eventLog) printf(isErr bool, format string, a ...interface{}) {
+ e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)}
+ el.mu.Lock()
+ e.Elapsed, e.NewDay = el.delta(e.When)
+ if len(el.events) < maxEventsPerLog {
+ el.events = append(el.events, e)
+ } else {
+ // Discard the oldest event.
+ if el.discarded == 0 {
+ // el.discarded starts at two to count for the event it
+ // is replacing, plus the next one that we are about to
+ // drop.
+ el.discarded = 2
+ } else {
+ el.discarded++
+ }
+ // TODO(sameer): if this causes allocations on a critical path,
+ // change eventLog.What to be a fmt.Stringer, as in trace.go.
+ el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded)
+ // The timestamp of the discarded meta-event should be
+ // the time of the last event it is representing.
+ el.events[0].When = el.events[1].When
+ copy(el.events[1:], el.events[2:])
+ el.events[maxEventsPerLog-1] = e
+ }
+ if e.IsErr {
+ el.LastErrorTime = e.When
+ }
+ el.mu.Unlock()
+}
+
+func (el *eventLog) ref() {
+ atomic.AddInt32(&el.refs, 1)
+}
+
+func (el *eventLog) unref() {
+ if atomic.AddInt32(&el.refs, -1) == 0 {
+ freeEventLog(el)
+ }
+}
+
+func (el *eventLog) When() string {
+ return el.Start.Format("2006/01/02 15:04:05.000000")
+}
+
+func (el *eventLog) ElapsedTime() string {
+ elapsed := time.Since(el.Start)
+ return fmt.Sprintf("%.6f", elapsed.Seconds())
+}
+
+func (el *eventLog) Stack() string {
+ buf := new(bytes.Buffer)
+ tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0)
+ printStackRecord(tw, el.stack)
+ tw.Flush()
+ return buf.String()
+}
+
+// printStackRecord prints the function + source line information
+// for a single stack trace.
+// Adapted from runtime/pprof/pprof.go.
+func printStackRecord(w io.Writer, stk []uintptr) {
+ for _, pc := range stk {
+ f := runtime.FuncForPC(pc)
+ if f == nil {
+ continue
+ }
+ file, line := f.FileLine(pc)
+ name := f.Name()
+ // Hide runtime.goexit and any runtime functions at the beginning.
+ if strings.HasPrefix(name, "runtime.") {
+ continue
+ }
+ fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line)
+ }
+}
+
+func (el *eventLog) Events() []logEntry {
+ el.mu.RLock()
+ defer el.mu.RUnlock()
+ return el.events
+}
+
+// freeEventLogs is a freelist of *eventLog
+var freeEventLogs = make(chan *eventLog, 1000)
+
+// newEventLog returns a event log ready to use.
+func newEventLog() *eventLog {
+ select {
+ case el := <-freeEventLogs:
+ return el
+ default:
+ return new(eventLog)
+ }
+}
+
+// freeEventLog adds el to freeEventLogs if there's room.
+// This is non-blocking.
+func freeEventLog(el *eventLog) {
+ el.reset()
+ select {
+ case freeEventLogs <- el:
+ default:
+ }
+}
+
+const eventsHTML = `
+<html>
+ <head>
+ <title>events</title>
+ </head>
+ <style type="text/css">
+ body {
+ font-family: sans-serif;
+ }
+ table#req-status td.family {
+ padding-right: 2em;
+ }
+ table#req-status td.active {
+ padding-right: 1em;
+ }
+ table#req-status td.empty {
+ color: #aaa;
+ }
+ table#reqs {
+ margin-top: 1em;
+ }
+ table#reqs tr.first {
+ {{if $.Expanded}}font-weight: bold;{{end}}
+ }
+ table#reqs td {
+ font-family: monospace;
+ }
+ table#reqs td.when {
+ text-align: right;
+ white-space: nowrap;
+ }
+ table#reqs td.elapsed {
+ padding: 0 0.5em;
+ text-align: right;
+ white-space: pre;
+ width: 10em;
+ }
+ address {
+ font-size: smaller;
+ margin-top: 5em;
+ }
+ </style>
+ <body>
+
+<h1>/debug/events</h1>
+
+<table id="req-status">
+ {{range $i, $fam := .Families}}
+ <tr>
+ <td class="family">{{$fam}}</td>
+
+ {{range $j, $bucket := $.Buckets}}
+ {{$n := index $.Counts $i $j}}
+ <td class="{{if not $bucket.MaxErrAge}}active{{end}}{{if not $n}}empty{{end}}">
+ {{if $n}}<a href="?fam={{$fam}}&b={{$j}}{{if $.Expanded}}&exp=1{{end}}">{{end}}
+ [{{$n}} {{$bucket.String}}]
+ {{if $n}}</a>{{end}}
+ </td>
+ {{end}}
+
+ </tr>{{end}}
+</table>
+
+{{if $.EventLogs}}
+<hr />
+<h3>Family: {{$.Family}}</h3>
+
+{{if $.Expanded}}<a href="?fam={{$.Family}}&b={{$.Bucket}}">{{end}}
+[Summary]{{if $.Expanded}}</a>{{end}}
+
+{{if not $.Expanded}}<a href="?fam={{$.Family}}&b={{$.Bucket}}&exp=1">{{end}}
+[Expanded]{{if not $.Expanded}}</a>{{end}}
+
+<table id="reqs">
+ <tr><th>When</th><th>Elapsed</th></tr>
+ {{range $el := $.EventLogs}}
+ <tr class="first">
+ <td class="when">{{$el.When}}</td>
+ <td class="elapsed">{{$el.ElapsedTime}}</td>
+ <td>{{$el.Title}}
+ </tr>
+ {{if $.Expanded}}
+ <tr>
+ <td class="when"></td>
+ <td class="elapsed"></td>
+ <td><pre>{{$el.Stack|trimSpace}}</pre></td>
+ </tr>
+ {{range $el.Events}}
+ <tr>
+ <td class="when">{{.WhenString}}</td>
+ <td class="elapsed">{{elapsed .Elapsed}}</td>
+ <td>.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}</td>
+ </tr>
+ {{end}}
+ {{end}}
+ {{end}}
+</table>
+{{end}}
+ </body>
+</html>
+`
diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go
new file mode 100644
index 0000000..bb42aa5
--- /dev/null
+++ b/vendor/golang.org/x/net/trace/histogram.go
@@ -0,0 +1,356 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package trace
+
+// This file implements histogramming for RPC statistics collection.
+
+import (
+ "bytes"
+ "fmt"
+ "html/template"
+ "log"
+ "math"
+
+ "golang.org/x/net/internal/timeseries"
+)
+
+const (
+ bucketCount = 38
+)
+
+// histogram keeps counts of values in buckets that are spaced
+// out in powers of 2: 0-1, 2-3, 4-7...
+// histogram implements timeseries.Observable
+type histogram struct {
+ sum int64 // running total of measurements
+ sumOfSquares float64 // square of running total
+ buckets []int64 // bucketed values for histogram
+ value int // holds a single value as an optimization
+ valueCount int64 // number of values recorded for single value
+}
+
+// AddMeasurement records a value measurement observation to the histogram.
+func (h *histogram) addMeasurement(value int64) {
+ // TODO: assert invariant
+ h.sum += value
+ h.sumOfSquares += float64(value) * float64(value)
+
+ bucketIndex := getBucket(value)
+
+ if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) {
+ h.value = bucketIndex
+ h.valueCount++
+ } else {
+ h.allocateBuckets()
+ h.buckets[bucketIndex]++
+ }
+}
+
+func (h *histogram) allocateBuckets() {
+ if h.buckets == nil {
+ h.buckets = make([]int64, bucketCount)
+ h.buckets[h.value] = h.valueCount
+ h.value = 0
+ h.valueCount = -1
+ }
+}
+
+func log2(i int64) int {
+ n := 0
+ for ; i >= 0x100; i >>= 8 {
+ n += 8
+ }
+ for ; i > 0; i >>= 1 {
+ n += 1
+ }
+ return n
+}
+
+func getBucket(i int64) (index int) {
+ index = log2(i) - 1
+ if index < 0 {
+ index = 0
+ }
+ if index >= bucketCount {
+ index = bucketCount - 1
+ }
+ return
+}
+
+// Total returns the number of recorded observations.
+func (h *histogram) total() (total int64) {
+ if h.valueCount >= 0 {
+ total = h.valueCount
+ }
+ for _, val := range h.buckets {
+ total += int64(val)
+ }
+ return
+}
+
+// Average returns the average value of recorded observations.
+func (h *histogram) average() float64 {
+ t := h.total()
+ if t == 0 {
+ return 0
+ }
+ return float64(h.sum) / float64(t)
+}
+
+// Variance returns the variance of recorded observations.
+func (h *histogram) variance() float64 {
+ t := float64(h.total())
+ if t == 0 {
+ return 0
+ }
+ s := float64(h.sum) / t
+ return h.sumOfSquares/t - s*s
+}
+
+// StandardDeviation returns the standard deviation of recorded observations.
+func (h *histogram) standardDeviation() float64 {
+ return math.Sqrt(h.variance())
+}
+
+// PercentileBoundary estimates the value that the given fraction of recorded
+// observations are less than.
+func (h *histogram) percentileBoundary(percentile float64) int64 {
+ total := h.total()
+
+ // Corner cases (make sure result is strictly less than Total())
+ if total == 0 {
+ return 0
+ } else if total == 1 {
+ return int64(h.average())
+ }
+
+ percentOfTotal := round(float64(total) * percentile)
+ var runningTotal int64
+
+ for i := range h.buckets {
+ value := h.buckets[i]
+ runningTotal += value
+ if runningTotal == percentOfTotal {
+ // We hit an exact bucket boundary. If the next bucket has data, it is a
+ // good estimate of the value. If the bucket is empty, we interpolate the
+ // midpoint between the next bucket's boundary and the next non-zero
+ // bucket. If the remaining buckets are all empty, then we use the
+ // boundary for the next bucket as the estimate.
+ j := uint8(i + 1)
+ min := bucketBoundary(j)
+ if runningTotal < total {
+ for h.buckets[j] == 0 {
+ j++
+ }
+ }
+ max := bucketBoundary(j)
+ return min + round(float64(max-min)/2)
+ } else if runningTotal > percentOfTotal {
+ // The value is in this bucket. Interpolate the value.
+ delta := runningTotal - percentOfTotal
+ percentBucket := float64(value-delta) / float64(value)
+ bucketMin := bucketBoundary(uint8(i))
+ nextBucketMin := bucketBoundary(uint8(i + 1))
+ bucketSize := nextBucketMin - bucketMin
+ return bucketMin + round(percentBucket*float64(bucketSize))
+ }
+ }
+ return bucketBoundary(bucketCount - 1)
+}
+
+// Median returns the estimated median of the observed values.
+func (h *histogram) median() int64 {
+ return h.percentileBoundary(0.5)
+}
+
+// Add adds other to h.
+func (h *histogram) Add(other timeseries.Observable) {
+ o := other.(*histogram)
+ if o.valueCount == 0 {
+ // Other histogram is empty
+ } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value {
+ // Both have a single bucketed value, aggregate them
+ h.valueCount += o.valueCount
+ } else {
+ // Two different values necessitate buckets in this histogram
+ h.allocateBuckets()
+ if o.valueCount >= 0 {
+ h.buckets[o.value] += o.valueCount
+ } else {
+ for i := range h.buckets {
+ h.buckets[i] += o.buckets[i]
+ }
+ }
+ }
+ h.sumOfSquares += o.sumOfSquares
+ h.sum += o.sum
+}
+
+// Clear resets the histogram to an empty state, removing all observed values.
+func (h *histogram) Clear() {
+ h.buckets = nil
+ h.value = 0
+ h.valueCount = 0
+ h.sum = 0
+ h.sumOfSquares = 0
+}
+
+// CopyFrom copies from other, which must be a *histogram, into h.
+func (h *histogram) CopyFrom(other timeseries.Observable) {
+ o := other.(*histogram)
+ if o.valueCount == -1 {
+ h.allocateBuckets()
+ copy(h.buckets, o.buckets)
+ }
+ h.sum = o.sum
+ h.sumOfSquares = o.sumOfSquares
+ h.value = o.value
+ h.valueCount = o.valueCount
+}
+
+// Multiply scales the histogram by the specified ratio.
+func (h *histogram) Multiply(ratio float64) {
+ if h.valueCount == -1 {
+ for i := range h.buckets {
+ h.buckets[i] = int64(float64(h.buckets[i]) * ratio)
+ }
+ } else {
+ h.valueCount = int64(float64(h.valueCount) * ratio)
+ }
+ h.sum = int64(float64(h.sum) * ratio)
+ h.sumOfSquares = h.sumOfSquares * ratio
+}
+
+// New creates a new histogram.
+func (h *histogram) New() timeseries.Observable {
+ r := new(histogram)
+ r.Clear()
+ return r
+}
+
+func (h *histogram) String() string {
+ return fmt.Sprintf("%d, %f, %d, %d, %v",
+ h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets)
+}
+
+// round returns the closest int64 to the argument
+func round(in float64) int64 {
+ return int64(math.Floor(in + 0.5))
+}
+
+// bucketBoundary returns the first value in the bucket.
+func bucketBoundary(bucket uint8) int64 {
+ if bucket == 0 {
+ return 0
+ }
+ return 1 << bucket
+}
+
+// bucketData holds data about a specific bucket for use in distTmpl.
+type bucketData struct {
+ Lower, Upper int64
+ N int64
+ Pct, CumulativePct float64
+ GraphWidth int
+}
+
+// data holds data about a Distribution for use in distTmpl.
+type data struct {
+ Buckets []*bucketData
+ Count, Median int64
+ Mean, StandardDeviation float64
+}
+
+// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets.
+const maxHTMLBarWidth = 350.0
+
+// newData returns data representing h for use in distTmpl.
+func (h *histogram) newData() *data {
+ // Force the allocation of buckets to simplify the rendering implementation
+ h.allocateBuckets()
+ // We scale the bars on the right so that the largest bar is
+ // maxHTMLBarWidth pixels in width.
+ maxBucket := int64(0)
+ for _, n := range h.buckets {
+ if n > maxBucket {
+ maxBucket = n
+ }
+ }
+ total := h.total()
+ barsizeMult := maxHTMLBarWidth / float64(maxBucket)
+ var pctMult float64
+ if total == 0 {
+ pctMult = 1.0
+ } else {
+ pctMult = 100.0 / float64(total)
+ }
+
+ buckets := make([]*bucketData, len(h.buckets))
+ runningTotal := int64(0)
+ for i, n := range h.buckets {
+ if n == 0 {
+ continue
+ }
+ runningTotal += n
+ var upperBound int64
+ if i < bucketCount-1 {
+ upperBound = bucketBoundary(uint8(i + 1))
+ } else {
+ upperBound = math.MaxInt64
+ }
+ buckets[i] = &bucketData{
+ Lower: bucketBoundary(uint8(i)),
+ Upper: upperBound,
+ N: n,
+ Pct: float64(n) * pctMult,
+ CumulativePct: float64(runningTotal) * pctMult,
+ GraphWidth: int(float64(n) * barsizeMult),
+ }
+ }
+ return &data{
+ Buckets: buckets,
+ Count: total,
+ Median: h.median(),
+ Mean: h.average(),
+ StandardDeviation: h.standardDeviation(),
+ }
+}
+
+func (h *histogram) html() template.HTML {
+ buf := new(bytes.Buffer)
+ if err := distTmpl.Execute(buf, h.newData()); err != nil {
+ buf.Reset()
+ log.Printf("net/trace: couldn't execute template: %v", err)
+ }
+ return template.HTML(buf.String())
+}
+
+// Input: data
+var distTmpl = template.Must(template.New("distTmpl").Parse(`
+<table>
+<tr>
+ <td style="padding:0.25em">Count: {{.Count}}</td>
+ <td style="padding:0.25em">Mean: {{printf "%.0f" .Mean}}</td>
+ <td style="padding:0.25em">StdDev: {{printf "%.0f" .StandardDeviation}}</td>
+ <td style="padding:0.25em">Median: {{.Median}}</td>
+</tr>
+</table>
+<hr>
+<table>
+{{range $b := .Buckets}}
+{{if $b}}
+ <tr>
+ <td style="padding:0 0 0 0.25em">[</td>
+ <td style="text-align:right;padding:0 0.25em">{{.Lower}},</td>
+ <td style="text-align:right;padding:0 0.25em">{{.Upper}})</td>
+ <td style="text-align:right;padding:0 0.25em">{{.N}}</td>
+ <td style="text-align:right;padding:0 0.25em">{{printf "%#.3f" .Pct}}%</td>
+ <td style="text-align:right;padding:0 0.25em">{{printf "%#.3f" .CumulativePct}}%</td>
+ <td><div style="background-color: blue; height: 1em; width: {{.GraphWidth}};"></div></td>
+ </tr>
+{{end}}
+{{end}}
+</table>
+`))
diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go
new file mode 100644
index 0000000..d860fcc
--- /dev/null
+++ b/vendor/golang.org/x/net/trace/trace.go
@@ -0,0 +1,1063 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package trace implements tracing of requests and long-lived objects.
+It exports HTTP interfaces on /debug/requests and /debug/events.
+
+A trace.Trace provides tracing for short-lived objects, usually requests.
+A request handler might be implemented like this:
+
+ func fooHandler(w http.ResponseWriter, req *http.Request) {
+ tr := trace.New("mypkg.Foo", req.URL.Path)
+ defer tr.Finish()
+ ...
+ tr.LazyPrintf("some event %q happened", str)
+ ...
+ if err := somethingImportant(); err != nil {
+ tr.LazyPrintf("somethingImportant failed: %v", err)
+ tr.SetError()
+ }
+ }
+
+The /debug/requests HTTP endpoint organizes the traces by family,
+errors, and duration. It also provides histogram of request duration
+for each family.
+
+A trace.EventLog provides tracing for long-lived objects, such as RPC
+connections.
+
+ // A Fetcher fetches URL paths for a single domain.
+ type Fetcher struct {
+ domain string
+ events trace.EventLog
+ }
+
+ func NewFetcher(domain string) *Fetcher {
+ return &Fetcher{
+ domain,
+ trace.NewEventLog("mypkg.Fetcher", domain),
+ }
+ }
+
+ func (f *Fetcher) Fetch(path string) (string, error) {
+ resp, err := http.Get("http://" + f.domain + "/" + path)
+ if err != nil {
+ f.events.Errorf("Get(%q) = %v", path, err)
+ return "", err
+ }
+ f.events.Printf("Get(%q) = %s", path, resp.Status)
+ ...
+ }
+
+ func (f *Fetcher) Close() error {
+ f.events.Finish()
+ return nil
+ }
+
+The /debug/events HTTP endpoint organizes the event logs by family and
+by time since the last error. The expanded view displays recent log
+entries and the log's call stack.
+*/
+package trace // import "golang.org/x/net/trace"
+
+import (
+ "bytes"
+ "fmt"
+ "html/template"
+ "io"
+ "log"
+ "net"
+ "net/http"
+ "runtime"
+ "sort"
+ "strconv"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/net/internal/timeseries"
+)
+
+// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing.
+// FOR DEBUGGING ONLY. This will slow down the program.
+var DebugUseAfterFinish = false
+
+// AuthRequest determines whether a specific request is permitted to load the
+// /debug/requests or /debug/events pages.
+//
+// It returns two bools; the first indicates whether the page may be viewed at all,
+// and the second indicates whether sensitive events will be shown.
+//
+// AuthRequest may be replaced by a program to customise its authorisation requirements.
+//
+// The default AuthRequest function returns (true, true) if and only if the request
+// comes from localhost/127.0.0.1/[::1].
+var AuthRequest = func(req *http.Request) (any, sensitive bool) {
+ // RemoteAddr is commonly in the form "IP" or "IP:port".
+ // If it is in the form "IP:port", split off the port.
+ host, _, err := net.SplitHostPort(req.RemoteAddr)
+ if err != nil {
+ host = req.RemoteAddr
+ }
+ switch host {
+ case "localhost", "127.0.0.1", "::1":
+ return true, true
+ default:
+ return false, false
+ }
+}
+
+func init() {
+ http.HandleFunc("/debug/requests", func(w http.ResponseWriter, req *http.Request) {
+ any, sensitive := AuthRequest(req)
+ if !any {
+ http.Error(w, "not allowed", http.StatusUnauthorized)
+ return
+ }
+ w.Header().Set("Content-Type", "text/html; charset=utf-8")
+ Render(w, req, sensitive)
+ })
+ http.HandleFunc("/debug/events", func(w http.ResponseWriter, req *http.Request) {
+ any, sensitive := AuthRequest(req)
+ if !any {
+ http.Error(w, "not allowed", http.StatusUnauthorized)
+ return
+ }
+ w.Header().Set("Content-Type", "text/html; charset=utf-8")
+ RenderEvents(w, req, sensitive)
+ })
+}
+
+// Render renders the HTML page typically served at /debug/requests.
+// It does not do any auth checking; see AuthRequest for the default auth check
+// used by the handler registered on http.DefaultServeMux.
+// req may be nil.
+func Render(w io.Writer, req *http.Request, sensitive bool) {
+ data := &struct {
+ Families []string
+ ActiveTraceCount map[string]int
+ CompletedTraces map[string]*family
+
+ // Set when a bucket has been selected.
+ Traces traceList
+ Family string
+ Bucket int
+ Expanded bool
+ Traced bool
+ Active bool
+ ShowSensitive bool // whether to show sensitive events
+
+ Histogram template.HTML
+ HistogramWindow string // e.g. "last minute", "last hour", "all time"
+
+ // If non-zero, the set of traces is a partial set,
+ // and this is the total number.
+ Total int
+ }{
+ CompletedTraces: completedTraces,
+ }
+
+ data.ShowSensitive = sensitive
+ if req != nil {
+ // Allow show_sensitive=0 to force hiding of sensitive data for testing.
+ // This only goes one way; you can't use show_sensitive=1 to see things.
+ if req.FormValue("show_sensitive") == "0" {
+ data.ShowSensitive = false
+ }
+
+ if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil {
+ data.Expanded = exp
+ }
+ if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil {
+ data.Traced = exp
+ }
+ }
+
+ completedMu.RLock()
+ data.Families = make([]string, 0, len(completedTraces))
+ for fam := range completedTraces {
+ data.Families = append(data.Families, fam)
+ }
+ completedMu.RUnlock()
+ sort.Strings(data.Families)
+
+ // We are careful here to minimize the time spent locking activeMu,
+ // since that lock is required every time an RPC starts and finishes.
+ data.ActiveTraceCount = make(map[string]int, len(data.Families))
+ activeMu.RLock()
+ for fam, s := range activeTraces {
+ data.ActiveTraceCount[fam] = s.Len()
+ }
+ activeMu.RUnlock()
+
+ var ok bool
+ data.Family, data.Bucket, ok = parseArgs(req)
+ switch {
+ case !ok:
+ // No-op
+ case data.Bucket == -1:
+ data.Active = true
+ n := data.ActiveTraceCount[data.Family]
+ data.Traces = getActiveTraces(data.Family)
+ if len(data.Traces) < n {
+ data.Total = n
+ }
+ case data.Bucket < bucketsPerFamily:
+ if b := lookupBucket(data.Family, data.Bucket); b != nil {
+ data.Traces = b.Copy(data.Traced)
+ }
+ default:
+ if f := getFamily(data.Family, false); f != nil {
+ var obs timeseries.Observable
+ f.LatencyMu.RLock()
+ switch o := data.Bucket - bucketsPerFamily; o {
+ case 0:
+ obs = f.Latency.Minute()
+ data.HistogramWindow = "last minute"
+ case 1:
+ obs = f.Latency.Hour()
+ data.HistogramWindow = "last hour"
+ case 2:
+ obs = f.Latency.Total()
+ data.HistogramWindow = "all time"
+ }
+ f.LatencyMu.RUnlock()
+ if obs != nil {
+ data.Histogram = obs.(*histogram).html()
+ }
+ }
+ }
+
+ if data.Traces != nil {
+ defer data.Traces.Free()
+ sort.Sort(data.Traces)
+ }
+
+ completedMu.RLock()
+ defer completedMu.RUnlock()
+ if err := pageTmpl.ExecuteTemplate(w, "Page", data); err != nil {
+ log.Printf("net/trace: Failed executing template: %v", err)
+ }
+}
+
+func parseArgs(req *http.Request) (fam string, b int, ok bool) {
+ if req == nil {
+ return "", 0, false
+ }
+ fam, bStr := req.FormValue("fam"), req.FormValue("b")
+ if fam == "" || bStr == "" {
+ return "", 0, false
+ }
+ b, err := strconv.Atoi(bStr)
+ if err != nil || b < -1 {
+ return "", 0, false
+ }
+
+ return fam, b, true
+}
+
+func lookupBucket(fam string, b int) *traceBucket {
+ f := getFamily(fam, false)
+ if f == nil || b < 0 || b >= len(f.Buckets) {
+ return nil
+ }
+ return f.Buckets[b]
+}
+
+type contextKeyT string
+
+var contextKey = contextKeyT("golang.org/x/net/trace.Trace")
+
+// NewContext returns a copy of the parent context
+// and associates it with a Trace.
+func NewContext(ctx context.Context, tr Trace) context.Context {
+ return context.WithValue(ctx, contextKey, tr)
+}
+
+// FromContext returns the Trace bound to the context, if any.
+func FromContext(ctx context.Context) (tr Trace, ok bool) {
+ tr, ok = ctx.Value(contextKey).(Trace)
+ return
+}
+
+// Trace represents an active request.
+type Trace interface {
+ // LazyLog adds x to the event log. It will be evaluated each time the
+ // /debug/requests page is rendered. Any memory referenced by x will be
+ // pinned until the trace is finished and later discarded.
+ LazyLog(x fmt.Stringer, sensitive bool)
+
+ // LazyPrintf evaluates its arguments with fmt.Sprintf each time the
+ // /debug/requests page is rendered. Any memory referenced by a will be
+ // pinned until the trace is finished and later discarded.
+ LazyPrintf(format string, a ...interface{})
+
+ // SetError declares that this trace resulted in an error.
+ SetError()
+
+ // SetRecycler sets a recycler for the trace.
+ // f will be called for each event passed to LazyLog at a time when
+ // it is no longer required, whether while the trace is still active
+ // and the event is discarded, or when a completed trace is discarded.
+ SetRecycler(f func(interface{}))
+
+ // SetTraceInfo sets the trace info for the trace.
+ // This is currently unused.
+ SetTraceInfo(traceID, spanID uint64)
+
+ // SetMaxEvents sets the maximum number of events that will be stored
+ // in the trace. This has no effect if any events have already been
+ // added to the trace.
+ SetMaxEvents(m int)
+
+ // Finish declares that this trace is complete.
+ // The trace should not be used after calling this method.
+ Finish()
+}
+
+type lazySprintf struct {
+ format string
+ a []interface{}
+}
+
+func (l *lazySprintf) String() string {
+ return fmt.Sprintf(l.format, l.a...)
+}
+
+// New returns a new Trace with the specified family and title.
+func New(family, title string) Trace {
+ tr := newTrace()
+ tr.ref()
+ tr.Family, tr.Title = family, title
+ tr.Start = time.Now()
+ tr.events = make([]event, 0, maxEventsPerTrace)
+
+ activeMu.RLock()
+ s := activeTraces[tr.Family]
+ activeMu.RUnlock()
+ if s == nil {
+ activeMu.Lock()
+ s = activeTraces[tr.Family] // check again
+ if s == nil {
+ s = new(traceSet)
+ activeTraces[tr.Family] = s
+ }
+ activeMu.Unlock()
+ }
+ s.Add(tr)
+
+ // Trigger allocation of the completed trace structure for this family.
+ // This will cause the family to be present in the request page during
+ // the first trace of this family. We don't care about the return value,
+ // nor is there any need for this to run inline, so we execute it in its
+ // own goroutine, but only if the family isn't allocated yet.
+ completedMu.RLock()
+ if _, ok := completedTraces[tr.Family]; !ok {
+ go allocFamily(tr.Family)
+ }
+ completedMu.RUnlock()
+
+ return tr
+}
+
+func (tr *trace) Finish() {
+ tr.Elapsed = time.Now().Sub(tr.Start)
+ if DebugUseAfterFinish {
+ buf := make([]byte, 4<<10) // 4 KB should be enough
+ n := runtime.Stack(buf, false)
+ tr.finishStack = buf[:n]
+ }
+
+ activeMu.RLock()
+ m := activeTraces[tr.Family]
+ activeMu.RUnlock()
+ m.Remove(tr)
+
+ f := getFamily(tr.Family, true)
+ for _, b := range f.Buckets {
+ if b.Cond.match(tr) {
+ b.Add(tr)
+ }
+ }
+ // Add a sample of elapsed time as microseconds to the family's timeseries
+ h := new(histogram)
+ h.addMeasurement(tr.Elapsed.Nanoseconds() / 1e3)
+ f.LatencyMu.Lock()
+ f.Latency.Add(h)
+ f.LatencyMu.Unlock()
+
+ tr.unref() // matches ref in New
+}
+
+const (
+ bucketsPerFamily = 9
+ tracesPerBucket = 10
+ maxActiveTraces = 20 // Maximum number of active traces to show.
+ maxEventsPerTrace = 10
+ numHistogramBuckets = 38
+)
+
+var (
+ // The active traces.
+ activeMu sync.RWMutex
+ activeTraces = make(map[string]*traceSet) // family -> traces
+
+ // Families of completed traces.
+ completedMu sync.RWMutex
+ completedTraces = make(map[string]*family) // family -> traces
+)
+
+type traceSet struct {
+ mu sync.RWMutex
+ m map[*trace]bool
+
+ // We could avoid the entire map scan in FirstN by having a slice of all the traces
+ // ordered by start time, and an index into that from the trace struct, with a periodic
+ // repack of the slice after enough traces finish; we could also use a skip list or similar.
+ // However, that would shift some of the expense from /debug/requests time to RPC time,
+ // which is probably the wrong trade-off.
+}
+
+func (ts *traceSet) Len() int {
+ ts.mu.RLock()
+ defer ts.mu.RUnlock()
+ return len(ts.m)
+}
+
+func (ts *traceSet) Add(tr *trace) {
+ ts.mu.Lock()
+ if ts.m == nil {
+ ts.m = make(map[*trace]bool)
+ }
+ ts.m[tr] = true
+ ts.mu.Unlock()
+}
+
+func (ts *traceSet) Remove(tr *trace) {
+ ts.mu.Lock()
+ delete(ts.m, tr)
+ ts.mu.Unlock()
+}
+
+// FirstN returns the first n traces ordered by time.
+func (ts *traceSet) FirstN(n int) traceList {
+ ts.mu.RLock()
+ defer ts.mu.RUnlock()
+
+ if n > len(ts.m) {
+ n = len(ts.m)
+ }
+ trl := make(traceList, 0, n)
+
+ // Fast path for when no selectivity is needed.
+ if n == len(ts.m) {
+ for tr := range ts.m {
+ tr.ref()
+ trl = append(trl, tr)
+ }
+ sort.Sort(trl)
+ return trl
+ }
+
+ // Pick the oldest n traces.
+ // This is inefficient. See the comment in the traceSet struct.
+ for tr := range ts.m {
+ // Put the first n traces into trl in the order they occur.
+ // When we have n, sort trl, and thereafter maintain its order.
+ if len(trl) < n {
+ tr.ref()
+ trl = append(trl, tr)
+ if len(trl) == n {
+ // This is guaranteed to happen exactly once during this loop.
+ sort.Sort(trl)
+ }
+ continue
+ }
+ if tr.Start.After(trl[n-1].Start) {
+ continue
+ }
+
+ // Find where to insert this one.
+ tr.ref()
+ i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) })
+ trl[n-1].unref()
+ copy(trl[i+1:], trl[i:])
+ trl[i] = tr
+ }
+
+ return trl
+}
+
+func getActiveTraces(fam string) traceList {
+ activeMu.RLock()
+ s := activeTraces[fam]
+ activeMu.RUnlock()
+ if s == nil {
+ return nil
+ }
+ return s.FirstN(maxActiveTraces)
+}
+
+func getFamily(fam string, allocNew bool) *family {
+ completedMu.RLock()
+ f := completedTraces[fam]
+ completedMu.RUnlock()
+ if f == nil && allocNew {
+ f = allocFamily(fam)
+ }
+ return f
+}
+
+func allocFamily(fam string) *family {
+ completedMu.Lock()
+ defer completedMu.Unlock()
+ f := completedTraces[fam]
+ if f == nil {
+ f = newFamily()
+ completedTraces[fam] = f
+ }
+ return f
+}
+
+// family represents a set of trace buckets and associated latency information.
+type family struct {
+ // traces may occur in multiple buckets.
+ Buckets [bucketsPerFamily]*traceBucket
+
+ // latency time series
+ LatencyMu sync.RWMutex
+ Latency *timeseries.MinuteHourSeries
+}
+
+func newFamily() *family {
+ return &family{
+ Buckets: [bucketsPerFamily]*traceBucket{
+ {Cond: minCond(0)},
+ {Cond: minCond(50 * time.Millisecond)},
+ {Cond: minCond(100 * time.Millisecond)},
+ {Cond: minCond(200 * time.Millisecond)},
+ {Cond: minCond(500 * time.Millisecond)},
+ {Cond: minCond(1 * time.Second)},
+ {Cond: minCond(10 * time.Second)},
+ {Cond: minCond(100 * time.Second)},
+ {Cond: errorCond{}},
+ },
+ Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }),
+ }
+}
+
+// traceBucket represents a size-capped bucket of historic traces,
+// along with a condition for a trace to belong to the bucket.
+type traceBucket struct {
+ Cond cond
+
+ // Ring buffer implementation of a fixed-size FIFO queue.
+ mu sync.RWMutex
+ buf [tracesPerBucket]*trace
+ start int // < tracesPerBucket
+ length int // <= tracesPerBucket
+}
+
+func (b *traceBucket) Add(tr *trace) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+
+ i := b.start + b.length
+ if i >= tracesPerBucket {
+ i -= tracesPerBucket
+ }
+ if b.length == tracesPerBucket {
+ // "Remove" an element from the bucket.
+ b.buf[i].unref()
+ b.start++
+ if b.start == tracesPerBucket {
+ b.start = 0
+ }
+ }
+ b.buf[i] = tr
+ if b.length < tracesPerBucket {
+ b.length++
+ }
+ tr.ref()
+}
+
+// Copy returns a copy of the traces in the bucket.
+// If tracedOnly is true, only the traces with trace information will be returned.
+// The logs will be ref'd before returning; the caller should call
+// the Free method when it is done with them.
+// TODO(dsymonds): keep track of traced requests in separate buckets.
+func (b *traceBucket) Copy(tracedOnly bool) traceList {
+ b.mu.RLock()
+ defer b.mu.RUnlock()
+
+ trl := make(traceList, 0, b.length)
+ for i, x := 0, b.start; i < b.length; i++ {
+ tr := b.buf[x]
+ if !tracedOnly || tr.spanID != 0 {
+ tr.ref()
+ trl = append(trl, tr)
+ }
+ x++
+ if x == b.length {
+ x = 0
+ }
+ }
+ return trl
+}
+
+func (b *traceBucket) Empty() bool {
+ b.mu.RLock()
+ defer b.mu.RUnlock()
+ return b.length == 0
+}
+
+// cond represents a condition on a trace.
+type cond interface {
+ match(t *trace) bool
+ String() string
+}
+
+type minCond time.Duration
+
+func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) }
+func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) }
+
+type errorCond struct{}
+
+func (e errorCond) match(t *trace) bool { return t.IsError }
+func (e errorCond) String() string { return "errors" }
+
+type traceList []*trace
+
+// Free calls unref on each element of the list.
+func (trl traceList) Free() {
+ for _, t := range trl {
+ t.unref()
+ }
+}
+
+// traceList may be sorted in reverse chronological order.
+func (trl traceList) Len() int { return len(trl) }
+func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) }
+func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] }
+
+// An event is a timestamped log entry in a trace.
+type event struct {
+ When time.Time
+ Elapsed time.Duration // since previous event in trace
+ NewDay bool // whether this event is on a different day to the previous event
+ Recyclable bool // whether this event was passed via LazyLog
+ What interface{} // string or fmt.Stringer
+ Sensitive bool // whether this event contains sensitive information
+}
+
+// WhenString returns a string representation of the elapsed time of the event.
+// It will include the date if midnight was crossed.
+func (e event) WhenString() string {
+ if e.NewDay {
+ return e.When.Format("2006/01/02 15:04:05.000000")
+ }
+ return e.When.Format("15:04:05.000000")
+}
+
+// discarded represents a number of discarded events.
+// It is stored as *discarded to make it easier to update in-place.
+type discarded int
+
+func (d *discarded) String() string {
+ return fmt.Sprintf("(%d events discarded)", int(*d))
+}
+
+// trace represents an active or complete request,
+// either sent or received by this program.
+type trace struct {
+ // Family is the top-level grouping of traces to which this belongs.
+ Family string
+
+ // Title is the title of this trace.
+ Title string
+
+ // Timing information.
+ Start time.Time
+ Elapsed time.Duration // zero while active
+
+ // Trace information if non-zero.
+ traceID uint64
+ spanID uint64
+
+ // Whether this trace resulted in an error.
+ IsError bool
+
+ // Append-only sequence of events (modulo discards).
+ mu sync.RWMutex
+ events []event
+
+ refs int32 // how many buckets this is in
+ recycler func(interface{})
+ disc discarded // scratch space to avoid allocation
+
+ finishStack []byte // where finish was called, if DebugUseAfterFinish is set
+}
+
+func (tr *trace) reset() {
+ // Clear all but the mutex. Mutexes may not be copied, even when unlocked.
+ tr.Family = ""
+ tr.Title = ""
+ tr.Start = time.Time{}
+ tr.Elapsed = 0
+ tr.traceID = 0
+ tr.spanID = 0
+ tr.IsError = false
+ tr.events = nil
+ tr.refs = 0
+ tr.recycler = nil
+ tr.disc = 0
+ tr.finishStack = nil
+}
+
+// delta returns the elapsed time since the last event or the trace start,
+// and whether it spans midnight.
+// L >= tr.mu
+func (tr *trace) delta(t time.Time) (time.Duration, bool) {
+ if len(tr.events) == 0 {
+ return t.Sub(tr.Start), false
+ }
+ prev := tr.events[len(tr.events)-1].When
+ return t.Sub(prev), prev.Day() != t.Day()
+}
+
+func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) {
+ if DebugUseAfterFinish && tr.finishStack != nil {
+ buf := make([]byte, 4<<10) // 4 KB should be enough
+ n := runtime.Stack(buf, false)
+ log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n])
+ }
+
+ /*
+ NOTE TO DEBUGGERS
+
+ If you are here because your program panicked in this code,
+ it is almost definitely the fault of code using this package,
+ and very unlikely to be the fault of this code.
+
+ The most likely scenario is that some code elsewhere is using
+ a requestz.Trace after its Finish method is called.
+ You can temporarily set the DebugUseAfterFinish var
+ to help discover where that is; do not leave that var set,
+ since it makes this package much less efficient.
+ */
+
+ e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive}
+ tr.mu.Lock()
+ e.Elapsed, e.NewDay = tr.delta(e.When)
+ if len(tr.events) < cap(tr.events) {
+ tr.events = append(tr.events, e)
+ } else {
+ // Discard the middle events.
+ di := int((cap(tr.events) - 1) / 2)
+ if d, ok := tr.events[di].What.(*discarded); ok {
+ (*d)++
+ } else {
+ // disc starts at two to count for the event it is replacing,
+ // plus the next one that we are about to drop.
+ tr.disc = 2
+ if tr.recycler != nil && tr.events[di].Recyclable {
+ go tr.recycler(tr.events[di].What)
+ }
+ tr.events[di].What = &tr.disc
+ }
+ // The timestamp of the discarded meta-event should be
+ // the time of the last event it is representing.
+ tr.events[di].When = tr.events[di+1].When
+
+ if tr.recycler != nil && tr.events[di+1].Recyclable {
+ go tr.recycler(tr.events[di+1].What)
+ }
+ copy(tr.events[di+1:], tr.events[di+2:])
+ tr.events[cap(tr.events)-1] = e
+ }
+ tr.mu.Unlock()
+}
+
+func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) {
+ tr.addEvent(x, true, sensitive)
+}
+
+func (tr *trace) LazyPrintf(format string, a ...interface{}) {
+ tr.addEvent(&lazySprintf{format, a}, false, false)
+}
+
+func (tr *trace) SetError() { tr.IsError = true }
+
+func (tr *trace) SetRecycler(f func(interface{})) {
+ tr.recycler = f
+}
+
+func (tr *trace) SetTraceInfo(traceID, spanID uint64) {
+ tr.traceID, tr.spanID = traceID, spanID
+}
+
+func (tr *trace) SetMaxEvents(m int) {
+ // Always keep at least three events: first, discarded count, last.
+ if len(tr.events) == 0 && m > 3 {
+ tr.events = make([]event, 0, m)
+ }
+}
+
+func (tr *trace) ref() {
+ atomic.AddInt32(&tr.refs, 1)
+}
+
+func (tr *trace) unref() {
+ if atomic.AddInt32(&tr.refs, -1) == 0 {
+ if tr.recycler != nil {
+ // freeTrace clears tr, so we hold tr.recycler and tr.events here.
+ go func(f func(interface{}), es []event) {
+ for _, e := range es {
+ if e.Recyclable {
+ f(e.What)
+ }
+ }
+ }(tr.recycler, tr.events)
+ }
+
+ freeTrace(tr)
+ }
+}
+
+func (tr *trace) When() string {
+ return tr.Start.Format("2006/01/02 15:04:05.000000")
+}
+
+func (tr *trace) ElapsedTime() string {
+ t := tr.Elapsed
+ if t == 0 {
+ // Active trace.
+ t = time.Since(tr.Start)
+ }
+ return fmt.Sprintf("%.6f", t.Seconds())
+}
+
+func (tr *trace) Events() []event {
+ tr.mu.RLock()
+ defer tr.mu.RUnlock()
+ return tr.events
+}
+
+var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool?
+
+// newTrace returns a trace ready to use.
+func newTrace() *trace {
+ select {
+ case tr := <-traceFreeList:
+ return tr
+ default:
+ return new(trace)
+ }
+}
+
+// freeTrace adds tr to traceFreeList if there's room.
+// This is non-blocking.
+func freeTrace(tr *trace) {
+ if DebugUseAfterFinish {
+ return // never reuse
+ }
+ tr.reset()
+ select {
+ case traceFreeList <- tr:
+ default:
+ }
+}
+
+func elapsed(d time.Duration) string {
+ b := []byte(fmt.Sprintf("%.6f", d.Seconds()))
+
+ // For subsecond durations, blank all zeros before decimal point,
+ // and all zeros between the decimal point and the first non-zero digit.
+ if d < time.Second {
+ dot := bytes.IndexByte(b, '.')
+ for i := 0; i < dot; i++ {
+ b[i] = ' '
+ }
+ for i := dot + 1; i < len(b); i++ {
+ if b[i] == '0' {
+ b[i] = ' '
+ } else {
+ break
+ }
+ }
+ }
+
+ return string(b)
+}
+
+var pageTmpl = template.Must(template.New("Page").Funcs(template.FuncMap{
+ "elapsed": elapsed,
+ "add": func(a, b int) int { return a + b },
+}).Parse(pageHTML))
+
+const pageHTML = `
+{{template "Prolog" .}}
+{{template "StatusTable" .}}
+{{template "Epilog" .}}
+
+{{define "Prolog"}}
+<html>
+ <head>
+ <title>/debug/requests</title>
+ <style type="text/css">
+ body {
+ font-family: sans-serif;
+ }
+ table#tr-status td.family {
+ padding-right: 2em;
+ }
+ table#tr-status td.active {
+ padding-right: 1em;
+ }
+ table#tr-status td.latency-first {
+ padding-left: 1em;
+ }
+ table#tr-status td.empty {
+ color: #aaa;
+ }
+ table#reqs {
+ margin-top: 1em;
+ }
+ table#reqs tr.first {
+ {{if $.Expanded}}font-weight: bold;{{end}}
+ }
+ table#reqs td {
+ font-family: monospace;
+ }
+ table#reqs td.when {
+ text-align: right;
+ white-space: nowrap;
+ }
+ table#reqs td.elapsed {
+ padding: 0 0.5em;
+ text-align: right;
+ white-space: pre;
+ width: 10em;
+ }
+ address {
+ font-size: smaller;
+ margin-top: 5em;
+ }
+ </style>
+ </head>
+ <body>
+
+<h1>/debug/requests</h1>
+{{end}} {{/* end of Prolog */}}
+
+{{define "StatusTable"}}
+<table id="tr-status">
+ {{range $fam := .Families}}
+ <tr>
+ <td class="family">{{$fam}}</td>
+
+ {{$n := index $.ActiveTraceCount $fam}}
+ <td class="active {{if not $n}}empty{{end}}">
+ {{if $n}}<a href="?fam={{$fam}}&b=-1{{if $.Expanded}}&exp=1{{end}}">{{end}}
+ [{{$n}} active]
+ {{if $n}}</a>{{end}}
+ </td>
+
+ {{$f := index $.CompletedTraces $fam}}
+ {{range $i, $b := $f.Buckets}}
+ {{$empty := $b.Empty}}
+ <td {{if $empty}}class="empty"{{end}}>
+ {{if not $empty}}<a href="?fam={{$fam}}&b={{$i}}{{if $.Expanded}}&exp=1{{end}}">{{end}}
+ [{{.Cond}}]
+ {{if not $empty}}</a>{{end}}
+ </td>
+ {{end}}
+
+ {{$nb := len $f.Buckets}}
+ <td class="latency-first">
+ <a href="?fam={{$fam}}&b={{$nb}}">[minute]</a>
+ </td>
+ <td>
+ <a href="?fam={{$fam}}&b={{add $nb 1}}">[hour]</a>
+ </td>
+ <td>
+ <a href="?fam={{$fam}}&b={{add $nb 2}}">[total]</a>
+ </td>
+
+ </tr>
+ {{end}}
+</table>
+{{end}} {{/* end of StatusTable */}}
+
+{{define "Epilog"}}
+{{if $.Traces}}
+<hr />
+<h3>Family: {{$.Family}}</h3>
+
+{{if or $.Expanded $.Traced}}
+ <a href="?fam={{$.Family}}&b={{$.Bucket}}">[Normal/Summary]</a>
+{{else}}
+ [Normal/Summary]
+{{end}}
+
+{{if or (not $.Expanded) $.Traced}}
+ <a href="?fam={{$.Family}}&b={{$.Bucket}}&exp=1">[Normal/Expanded]</a>
+{{else}}
+ [Normal/Expanded]
+{{end}}
+
+{{if not $.Active}}
+ {{if or $.Expanded (not $.Traced)}}
+ <a href="?fam={{$.Family}}&b={{$.Bucket}}&rtraced=1">[Traced/Summary]</a>
+ {{else}}
+ [Traced/Summary]
+ {{end}}
+ {{if or (not $.Expanded) (not $.Traced)}}
+ <a href="?fam={{$.Family}}&b={{$.Bucket}}&exp=1&rtraced=1">[Traced/Expanded]</a>
+ {{else}}
+ [Traced/Expanded]
+ {{end}}
+{{end}}
+
+{{if $.Total}}
+<p><em>Showing <b>{{len $.Traces}}</b> of <b>{{$.Total}}</b> traces.</em></p>
+{{end}}
+
+<table id="reqs">
+ <caption>
+ {{if $.Active}}Active{{else}}Completed{{end}} Requests
+ </caption>
+ <tr><th>When</th><th>Elapsed&nbsp;(s)</th></tr>
+ {{range $tr := $.Traces}}
+ <tr class="first">
+ <td class="when">{{$tr.When}}</td>
+ <td class="elapsed">{{$tr.ElapsedTime}}</td>
+ <td>{{$tr.Title}}</td>
+ {{/* TODO: include traceID/spanID */}}
+ </tr>
+ {{if $.Expanded}}
+ {{range $tr.Events}}
+ <tr>
+ <td class="when">{{.WhenString}}</td>
+ <td class="elapsed">{{elapsed .Elapsed}}</td>
+ <td>{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}<em>[redacted]</em>{{end}}</td>
+ </tr>
+ {{end}}
+ {{end}}
+ {{end}}
+</table>
+{{end}} {{/* if $.Traces */}}
+
+{{if $.Histogram}}
+<h4>Latency (&micro;s) of {{$.Family}} over {{$.HistogramWindow}}</h4>
+{{$.Histogram}}
+{{end}} {{/* if $.Histogram */}}
+
+ </body>
+</html>
+{{end}} {{/* end of Epilog */}}
+`
diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json
new file mode 100644
index 0000000..bcde291
--- /dev/null
+++ b/vendor/google.golang.org/api/storage/v1/storage-api.json
@@ -0,0 +1,2861 @@
+{
+ "kind": "discovery#restDescription",
+ "etag": "\"jQLIOHBVnDZie4rQHGH1WJF-INE/HunW8kZz70Rw8UgbEglfvbYNW8k\"",
+ "discoveryVersion": "v1",
+ "id": "storage:v1",
+ "name": "storage",
+ "version": "v1",
+ "revision": "20160504",
+ "title": "Cloud Storage JSON API",
+ "description": "Stores and retrieves potentially large, immutable data objects.",
+ "ownerDomain": "google.com",
+ "ownerName": "Google",
+ "icons": {
+ "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png",
+ "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png"
+ },
+ "documentationLink": "https://developers.google.com/storage/docs/json_api/",
+ "labels": [
+ "labs"
+ ],
+ "protocol": "rest",
+ "baseUrl": "https://www.googleapis.com/storage/v1/",
+ "basePath": "/storage/v1/",
+ "rootUrl": "https://www.googleapis.com/",
+ "servicePath": "storage/v1/",
+ "batchPath": "batch",
+ "parameters": {
+ "alt": {
+ "type": "string",
+ "description": "Data format for the response.",
+ "default": "json",
+ "enum": [
+ "json"
+ ],
+ "enumDescriptions": [
+ "Responses with Content-Type of application/json"
+ ],
+ "location": "query"
+ },
+ "fields": {
+ "type": "string",
+ "description": "Selector specifying which fields to include in a partial response.",
+ "location": "query"
+ },
+ "key": {
+ "type": "string",
+ "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.",
+ "location": "query"
+ },
+ "oauth_token": {
+ "type": "string",
+ "description": "OAuth 2.0 token for the current user.",
+ "location": "query"
+ },
+ "prettyPrint": {
+ "type": "boolean",
+ "description": "Returns response with indentations and line breaks.",
+ "default": "true",
+ "location": "query"
+ },
+ "quotaUser": {
+ "type": "string",
+ "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.",
+ "location": "query"
+ },
+ "userIp": {
+ "type": "string",
+ "description": "IP address of the site where the request originates. Use this if you want to enforce per-user limits.",
+ "location": "query"
+ }
+ },
+ "auth": {
+ "oauth2": {
+ "scopes": {
+ "https://www.googleapis.com/auth/cloud-platform": {
+ "description": "View and manage your data across Google Cloud Platform services"
+ },
+ "https://www.googleapis.com/auth/cloud-platform.read-only": {
+ "description": "View your data across Google Cloud Platform services"
+ },
+ "https://www.googleapis.com/auth/devstorage.full_control": {
+ "description": "Manage your data and permissions in Google Cloud Storage"
+ },
+ "https://www.googleapis.com/auth/devstorage.read_only": {
+ "description": "View your data in Google Cloud Storage"
+ },
+ "https://www.googleapis.com/auth/devstorage.read_write": {
+ "description": "Manage your data in Google Cloud Storage"
+ }
+ }
+ }
+ },
+ "schemas": {
+ "Bucket": {
+ "id": "Bucket",
+ "type": "object",
+ "description": "A bucket.",
+ "properties": {
+ "acl": {
+ "type": "array",
+ "description": "Access controls on the bucket.",
+ "items": {
+ "$ref": "BucketAccessControl"
+ },
+ "annotations": {
+ "required": [
+ "storage.buckets.update"
+ ]
+ }
+ },
+ "cors": {
+ "type": "array",
+ "description": "The bucket's Cross-Origin Resource Sharing (CORS) configuration.",
+ "items": {
+ "type": "object",
+ "properties": {
+ "maxAgeSeconds": {
+ "type": "integer",
+ "description": "The value, in seconds, to return in the Access-Control-Max-Age header used in preflight responses.",
+ "format": "int32"
+ },
+ "method": {
+ "type": "array",
+ "description": "The list of HTTP methods on which to include CORS response headers, (GET, OPTIONS, POST, etc) Note: \"*\" is permitted in the list of methods, and means \"any method\".",
+ "items": {
+ "type": "string"
+ }
+ },
+ "origin": {
+ "type": "array",
+ "description": "The list of Origins eligible to receive CORS response headers. Note: \"*\" is permitted in the list of origins, and means \"any Origin\".",
+ "items": {
+ "type": "string"
+ }
+ },
+ "responseHeader": {
+ "type": "array",
+ "description": "The list of HTTP headers other than the simple response headers to give permission for the user-agent to share across domains.",
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ },
+ "defaultObjectAcl": {
+ "type": "array",
+ "description": "Default access controls to apply to new objects when no ACL is provided.",
+ "items": {
+ "$ref": "ObjectAccessControl"
+ }
+ },
+ "etag": {
+ "type": "string",
+ "description": "HTTP 1.1 Entity tag for the bucket."
+ },
+ "id": {
+ "type": "string",
+ "description": "The ID of the bucket."
+ },
+ "kind": {
+ "type": "string",
+ "description": "The kind of item this is. For buckets, this is always storage#bucket.",
+ "default": "storage#bucket"
+ },
+ "lifecycle": {
+ "type": "object",
+ "description": "The bucket's lifecycle configuration. See lifecycle management for more information.",
+ "properties": {
+ "rule": {
+ "type": "array",
+ "description": "A lifecycle management rule, which is made of an action to take and the condition(s) under which the action will be taken.",
+ "items": {
+ "type": "object",
+ "properties": {
+ "action": {
+ "type": "object",
+ "description": "The action to take.",
+ "properties": {
+ "type": {
+ "type": "string",
+ "description": "Type of the action. Currently, only Delete is supported."
+ }
+ }
+ },
+ "condition": {
+ "type": "object",
+ "description": "The condition(s) under which the action will be taken.",
+ "properties": {
+ "age": {
+ "type": "integer",
+ "description": "Age of an object (in days). This condition is satisfied when an object reaches the specified age.",
+ "format": "int32"
+ },
+ "createdBefore": {
+ "type": "string",
+ "description": "A date in RFC 3339 format with only the date part (for instance, \"2013-01-15\"). This condition is satisfied when an object is created before midnight of the specified date in UTC.",
+ "format": "date"
+ },
+ "isLive": {
+ "type": "boolean",
+ "description": "Relevant only for versioned objects. If the value is true, this condition matches live objects; if the value is false, it matches archived objects."
+ },
+ "numNewerVersions": {
+ "type": "integer",
+ "description": "Relevant only for versioned objects. If the value is N, this condition is satisfied when there are at least N versions (including the live version) newer than this version of the object.",
+ "format": "int32"
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ },
+ "location": {
+ "type": "string",
+ "description": "The location of the bucket. Object data for objects in the bucket resides in physical storage within this region. Defaults to US. See the developer's guide for the authoritative list."
+ },
+ "logging": {
+ "type": "object",
+ "description": "The bucket's logging configuration, which defines the destination bucket and optional name prefix for the current bucket's logs.",
+ "properties": {
+ "logBucket": {
+ "type": "string",
+ "description": "The destination bucket where the current bucket's logs should be placed."
+ },
+ "logObjectPrefix": {
+ "type": "string",
+ "description": "A prefix for log object names."
+ }
+ }
+ },
+ "metageneration": {
+ "type": "string",
+ "description": "The metadata generation of this bucket.",
+ "format": "int64"
+ },
+ "name": {
+ "type": "string",
+ "description": "The name of the bucket.",
+ "annotations": {
+ "required": [
+ "storage.buckets.insert"
+ ]
+ }
+ },
+ "owner": {
+ "type": "object",
+ "description": "The owner of the bucket. This is always the project team's owner group.",
+ "properties": {
+ "entity": {
+ "type": "string",
+ "description": "The entity, in the form project-owner-projectId."
+ },
+ "entityId": {
+ "type": "string",
+ "description": "The ID for the entity."
+ }
+ }
+ },
+ "projectNumber": {
+ "type": "string",
+ "description": "The project number of the project the bucket belongs to.",
+ "format": "uint64"
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "The URI of this bucket."
+ },
+ "storageClass": {
+ "type": "string",
+ "description": "The bucket's storage class. This defines how objects in the bucket are stored and determines the SLA and the cost of storage. Values include STANDARD, NEARLINE and DURABLE_REDUCED_AVAILABILITY. Defaults to STANDARD. For more information, see storage classes."
+ },
+ "timeCreated": {
+ "type": "string",
+ "description": "The creation time of the bucket in RFC 3339 format.",
+ "format": "date-time"
+ },
+ "updated": {
+ "type": "string",
+ "description": "The modification time of the bucket in RFC 3339 format.",
+ "format": "date-time"
+ },
+ "versioning": {
+ "type": "object",
+ "description": "The bucket's versioning configuration.",
+ "properties": {
+ "enabled": {
+ "type": "boolean",
+ "description": "While set to true, versioning is fully enabled for this bucket."
+ }
+ }
+ },
+ "website": {
+ "type": "object",
+ "description": "The bucket's website configuration.",
+ "properties": {
+ "mainPageSuffix": {
+ "type": "string",
+ "description": "Behaves as the bucket's directory index where missing objects are treated as potential directories."
+ },
+ "notFoundPage": {
+ "type": "string",
+ "description": "The custom object to return when a requested resource is not found."
+ }
+ }
+ }
+ }
+ },
+ "BucketAccessControl": {
+ "id": "BucketAccessControl",
+ "type": "object",
+ "description": "An access-control entry.",
+ "properties": {
+ "bucket": {
+ "type": "string",
+ "description": "The name of the bucket."
+ },
+ "domain": {
+ "type": "string",
+ "description": "The domain associated with the entity, if any."
+ },
+ "email": {
+ "type": "string",
+ "description": "The email address associated with the entity, if any."
+ },
+ "entity": {
+ "type": "string",
+ "description": "The entity holding the permission, in one of the following forms: \n- user-userId \n- user-email \n- group-groupId \n- group-email \n- domain-domain \n- project-team-projectId \n- allUsers \n- allAuthenticatedUsers Examples: \n- The user liz@example.com would be user-liz@example.com. \n- The group example@googlegroups.com would be group-example@googlegroups.com. \n- To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com.",
+ "annotations": {
+ "required": [
+ "storage.bucketAccessControls.insert"
+ ]
+ }
+ },
+ "entityId": {
+ "type": "string",
+ "description": "The ID for the entity, if any."
+ },
+ "etag": {
+ "type": "string",
+ "description": "HTTP 1.1 Entity tag for the access-control entry."
+ },
+ "id": {
+ "type": "string",
+ "description": "The ID of the access-control entry."
+ },
+ "kind": {
+ "type": "string",
+ "description": "The kind of item this is. For bucket access control entries, this is always storage#bucketAccessControl.",
+ "default": "storage#bucketAccessControl"
+ },
+ "projectTeam": {
+ "type": "object",
+ "description": "The project team associated with the entity, if any.",
+ "properties": {
+ "projectNumber": {
+ "type": "string",
+ "description": "The project number."
+ },
+ "team": {
+ "type": "string",
+ "description": "The team. Can be owners, editors, or viewers."
+ }
+ }
+ },
+ "role": {
+ "type": "string",
+ "description": "The access permission for the entity. Can be READER, WRITER, or OWNER.",
+ "annotations": {
+ "required": [
+ "storage.bucketAccessControls.insert"
+ ]
+ }
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "The link to this access-control entry."
+ }
+ }
+ },
+ "BucketAccessControls": {
+ "id": "BucketAccessControls",
+ "type": "object",
+ "description": "An access-control list.",
+ "properties": {
+ "items": {
+ "type": "array",
+ "description": "The list of items.",
+ "items": {
+ "$ref": "BucketAccessControl"
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "The kind of item this is. For lists of bucket access control entries, this is always storage#bucketAccessControls.",
+ "default": "storage#bucketAccessControls"
+ }
+ }
+ },
+ "Buckets": {
+ "id": "Buckets",
+ "type": "object",
+ "description": "A list of buckets.",
+ "properties": {
+ "items": {
+ "type": "array",
+ "description": "The list of items.",
+ "items": {
+ "$ref": "Bucket"
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "The kind of item this is. For lists of buckets, this is always storage#buckets.",
+ "default": "storage#buckets"
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results."
+ }
+ }
+ },
+ "Channel": {
+ "id": "Channel",
+ "type": "object",
+ "description": "An notification channel used to watch for resource changes.",
+ "properties": {
+ "address": {
+ "type": "string",
+ "description": "The address where notifications are delivered for this channel."
+ },
+ "expiration": {
+ "type": "string",
+ "description": "Date and time of notification channel expiration, expressed as a Unix timestamp, in milliseconds. Optional.",
+ "format": "int64"
+ },
+ "id": {
+ "type": "string",
+ "description": "A UUID or similar unique string that identifies this channel."
+ },
+ "kind": {
+ "type": "string",
+ "description": "Identifies this as a notification channel used to watch for changes to a resource. Value: the fixed string \"api#channel\".",
+ "default": "api#channel"
+ },
+ "params": {
+ "type": "object",
+ "description": "Additional parameters controlling delivery channel behavior. Optional.",
+ "additionalProperties": {
+ "type": "string",
+ "description": "Declares a new parameter by name."
+ }
+ },
+ "payload": {
+ "type": "boolean",
+ "description": "A Boolean value to indicate whether payload is wanted. Optional."
+ },
+ "resourceId": {
+ "type": "string",
+ "description": "An opaque ID that identifies the resource being watched on this channel. Stable across different API versions."
+ },
+ "resourceUri": {
+ "type": "string",
+ "description": "A version-specific identifier for the watched resource."
+ },
+ "token": {
+ "type": "string",
+ "description": "An arbitrary string delivered to the target address with each notification delivered over this channel. Optional."
+ },
+ "type": {
+ "type": "string",
+ "description": "The type of delivery mechanism used for this channel."
+ }
+ }
+ },
+ "ComposeRequest": {
+ "id": "ComposeRequest",
+ "type": "object",
+ "description": "A Compose request.",
+ "properties": {
+ "destination": {
+ "$ref": "Object",
+ "description": "Properties of the resulting object."
+ },
+ "kind": {
+ "type": "string",
+ "description": "The kind of item this is.",
+ "default": "storage#composeRequest"
+ },
+ "sourceObjects": {
+ "type": "array",
+ "description": "The list of source objects that will be concatenated into a single object.",
+ "items": {
+ "type": "object",
+ "properties": {
+ "generation": {
+ "type": "string",
+ "description": "The generation of this object to use as the source.",
+ "format": "int64"
+ },
+ "name": {
+ "type": "string",
+ "description": "The source object's name. The source object's bucket is implicitly the destination bucket.",
+ "annotations": {
+ "required": [
+ "storage.objects.compose"
+ ]
+ }
+ },
+ "objectPreconditions": {
+ "type": "object",
+ "description": "Conditions that must be met for this operation to execute.",
+ "properties": {
+ "ifGenerationMatch": {
+ "type": "string",
+ "description": "Only perform the composition if the generation of the source object that would be used matches this value. If this value and a generation are both specified, they must be the same value or the call will fail.",
+ "format": "int64"
+ }
+ }
+ }
+ }
+ },
+ "annotations": {
+ "required": [
+ "storage.objects.compose"
+ ]
+ }
+ }
+ }
+ },
+ "Object": {
+ "id": "Object",
+ "type": "object",
+ "description": "An object.",
+ "properties": {
+ "acl": {
+ "type": "array",
+ "description": "Access controls on the object.",
+ "items": {
+ "$ref": "ObjectAccessControl"
+ },
+ "annotations": {
+ "required": [
+ "storage.objects.update"
+ ]
+ }
+ },
+ "bucket": {
+ "type": "string",
+ "description": "The name of the bucket containing this object."
+ },
+ "cacheControl": {
+ "type": "string",
+ "description": "Cache-Control directive for the object data."
+ },
+ "componentCount": {
+ "type": "integer",
+ "description": "Number of underlying components that make up this object. Components are accumulated by compose operations.",
+ "format": "int32"
+ },
+ "contentDisposition": {
+ "type": "string",
+ "description": "Content-Disposition of the object data."
+ },
+ "contentEncoding": {
+ "type": "string",
+ "description": "Content-Encoding of the object data."
+ },
+ "contentLanguage": {
+ "type": "string",
+ "description": "Content-Language of the object data."
+ },
+ "contentType": {
+ "type": "string",
+ "description": "Content-Type of the object data. If contentType is not specified, object downloads will be served as application/octet-stream."
+ },
+ "crc32c": {
+ "type": "string",
+ "description": "CRC32c checksum, as described in RFC 4960, Appendix B; encoded using base64 in big-endian byte order. For more information about using the CRC32c checksum, see Hashes and ETags: Best Practices."
+ },
+ "customerEncryption": {
+ "type": "object",
+ "description": "Metadata of customer-supplied encryption key, if the object is encrypted by such a key.",
+ "properties": {
+ "encryptionAlgorithm": {
+ "type": "string",
+ "description": "The encryption algorithm."
+ },
+ "keySha256": {
+ "type": "string",
+ "description": "SHA256 hash value of the encryption key."
+ }
+ }
+ },
+ "etag": {
+ "type": "string",
+ "description": "HTTP 1.1 Entity tag for the object."
+ },
+ "generation": {
+ "type": "string",
+ "description": "The content generation of this object. Used for object versioning.",
+ "format": "int64"
+ },
+ "id": {
+ "type": "string",
+ "description": "The ID of the object."
+ },
+ "kind": {
+ "type": "string",
+ "description": "The kind of item this is. For objects, this is always storage#object.",
+ "default": "storage#object"
+ },
+ "md5Hash": {
+ "type": "string",
+ "description": "MD5 hash of the data; encoded using base64. For more information about using the MD5 hash, see Hashes and ETags: Best Practices."
+ },
+ "mediaLink": {
+ "type": "string",
+ "description": "Media download link."
+ },
+ "metadata": {
+ "type": "object",
+ "description": "User-provided metadata, in key/value pairs.",
+ "additionalProperties": {
+ "type": "string",
+ "description": "An individual metadata entry."
+ }
+ },
+ "metageneration": {
+ "type": "string",
+ "description": "The version of the metadata for this object at this generation. Used for preconditions and for detecting changes in metadata. A metageneration number is only meaningful in the context of a particular generation of a particular object.",
+ "format": "int64"
+ },
+ "name": {
+ "type": "string",
+ "description": "The name of this object. Required if not specified by URL parameter."
+ },
+ "owner": {
+ "type": "object",
+ "description": "The owner of the object. This will always be the uploader of the object.",
+ "properties": {
+ "entity": {
+ "type": "string",
+ "description": "The entity, in the form user-userId."
+ },
+ "entityId": {
+ "type": "string",
+ "description": "The ID for the entity."
+ }
+ }
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "The link to this object."
+ },
+ "size": {
+ "type": "string",
+ "description": "Content-Length of the data in bytes.",
+ "format": "uint64"
+ },
+ "storageClass": {
+ "type": "string",
+ "description": "Storage class of the object."
+ },
+ "timeCreated": {
+ "type": "string",
+ "description": "The creation time of the object in RFC 3339 format.",
+ "format": "date-time"
+ },
+ "timeDeleted": {
+ "type": "string",
+ "description": "The deletion time of the object in RFC 3339 format. Will be returned if and only if this version of the object has been deleted.",
+ "format": "date-time"
+ },
+ "updated": {
+ "type": "string",
+ "description": "The modification time of the object metadata in RFC 3339 format.",
+ "format": "date-time"
+ }
+ }
+ },
+ "ObjectAccessControl": {
+ "id": "ObjectAccessControl",
+ "type": "object",
+ "description": "An access-control entry.",
+ "properties": {
+ "bucket": {
+ "type": "string",
+ "description": "The name of the bucket."
+ },
+ "domain": {
+ "type": "string",
+ "description": "The domain associated with the entity, if any."
+ },
+ "email": {
+ "type": "string",
+ "description": "The email address associated with the entity, if any."
+ },
+ "entity": {
+ "type": "string",
+ "description": "The entity holding the permission, in one of the following forms: \n- user-userId \n- user-email \n- group-groupId \n- group-email \n- domain-domain \n- project-team-projectId \n- allUsers \n- allAuthenticatedUsers Examples: \n- The user liz@example.com would be user-liz@example.com. \n- The group example@googlegroups.com would be group-example@googlegroups.com. \n- To refer to all members of the Google Apps for Business domain example.com, the entity would be domain-example.com."
+ },
+ "entityId": {
+ "type": "string",
+ "description": "The ID for the entity, if any."
+ },
+ "etag": {
+ "type": "string",
+ "description": "HTTP 1.1 Entity tag for the access-control entry."
+ },
+ "generation": {
+ "type": "string",
+ "description": "The content generation of the object.",
+ "format": "int64"
+ },
+ "id": {
+ "type": "string",
+ "description": "The ID of the access-control entry."
+ },
+ "kind": {
+ "type": "string",
+ "description": "The kind of item this is. For object access control entries, this is always storage#objectAccessControl.",
+ "default": "storage#objectAccessControl"
+ },
+ "object": {
+ "type": "string",
+ "description": "The name of the object."
+ },
+ "projectTeam": {
+ "type": "object",
+ "description": "The project team associated with the entity, if any.",
+ "properties": {
+ "projectNumber": {
+ "type": "string",
+ "description": "The project number."
+ },
+ "team": {
+ "type": "string",
+ "description": "The team. Can be owners, editors, or viewers."
+ }
+ }
+ },
+ "role": {
+ "type": "string",
+ "description": "The access permission for the entity. Can be READER or OWNER."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "The link to this access-control entry."
+ }
+ }
+ },
+ "ObjectAccessControls": {
+ "id": "ObjectAccessControls",
+ "type": "object",
+ "description": "An access-control list.",
+ "properties": {
+ "items": {
+ "type": "array",
+ "description": "The list of items.",
+ "items": {
+ "type": "any"
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "The kind of item this is. For lists of object access control entries, this is always storage#objectAccessControls.",
+ "default": "storage#objectAccessControls"
+ }
+ }
+ },
+ "Objects": {
+ "id": "Objects",
+ "type": "object",
+ "description": "A list of objects.",
+ "properties": {
+ "items": {
+ "type": "array",
+ "description": "The list of items.",
+ "items": {
+ "$ref": "Object"
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "The kind of item this is. For lists of objects, this is always storage#objects.",
+ "default": "storage#objects"
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "The continuation token, used to page through large result sets. Provide this value in a subsequent request to return the next page of results."
+ },
+ "prefixes": {
+ "type": "array",
+ "description": "The list of prefixes of objects matching-but-not-listed up to and including the requested delimiter.",
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "RewriteResponse": {
+ "id": "RewriteResponse",
+ "type": "object",
+ "description": "A rewrite response.",
+ "properties": {
+ "done": {
+ "type": "boolean",
+ "description": "true if the copy is finished; otherwise, false if the copy is in progress. This property is always present in the response."
+ },
+ "kind": {
+ "type": "string",
+ "description": "The kind of item this is.",
+ "default": "storage#rewriteResponse"
+ },
+ "objectSize": {
+ "type": "string",
+ "description": "The total size of the object being copied in bytes. This property is always present in the response.",
+ "format": "uint64"
+ },
+ "resource": {
+ "$ref": "Object",
+ "description": "A resource containing the metadata for the copied-to object. This property is present in the response only when copying completes."
+ },
+ "rewriteToken": {
+ "type": "string",
+ "description": "A token to use in subsequent requests to continue copying data. This token is present in the response only when there is more data to copy."
+ },
+ "totalBytesRewritten": {
+ "type": "string",
+ "description": "The total bytes written so far, which can be used to provide a waiting user with a progress indicator. This property is always present in the response.",
+ "format": "uint64"
+ }
+ }
+ }
+ },
+ "resources": {
+ "bucketAccessControls": {
+ "methods": {
+ "delete": {
+ "id": "storage.bucketAccessControls.delete",
+ "path": "b/{bucket}/acl/{entity}",
+ "httpMethod": "DELETE",
+ "description": "Permanently deletes the ACL entry for the specified entity on the specified bucket.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "entity": {
+ "type": "string",
+ "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "entity"
+ ],
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ },
+ "get": {
+ "id": "storage.bucketAccessControls.get",
+ "path": "b/{bucket}/acl/{entity}",
+ "httpMethod": "GET",
+ "description": "Returns the ACL entry for the specified entity on the specified bucket.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "entity": {
+ "type": "string",
+ "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "entity"
+ ],
+ "response": {
+ "$ref": "BucketAccessControl"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ },
+ "insert": {
+ "id": "storage.bucketAccessControls.insert",
+ "path": "b/{bucket}/acl",
+ "httpMethod": "POST",
+ "description": "Creates a new ACL entry on the specified bucket.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "bucket"
+ ],
+ "request": {
+ "$ref": "BucketAccessControl"
+ },
+ "response": {
+ "$ref": "BucketAccessControl"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ },
+ "list": {
+ "id": "storage.bucketAccessControls.list",
+ "path": "b/{bucket}/acl",
+ "httpMethod": "GET",
+ "description": "Retrieves ACL entries on the specified bucket.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "bucket"
+ ],
+ "response": {
+ "$ref": "BucketAccessControls"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ },
+ "patch": {
+ "id": "storage.bucketAccessControls.patch",
+ "path": "b/{bucket}/acl/{entity}",
+ "httpMethod": "PATCH",
+ "description": "Updates an ACL entry on the specified bucket. This method supports patch semantics.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "entity": {
+ "type": "string",
+ "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "entity"
+ ],
+ "request": {
+ "$ref": "BucketAccessControl"
+ },
+ "response": {
+ "$ref": "BucketAccessControl"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ },
+ "update": {
+ "id": "storage.bucketAccessControls.update",
+ "path": "b/{bucket}/acl/{entity}",
+ "httpMethod": "PUT",
+ "description": "Updates an ACL entry on the specified bucket.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "entity": {
+ "type": "string",
+ "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "entity"
+ ],
+ "request": {
+ "$ref": "BucketAccessControl"
+ },
+ "response": {
+ "$ref": "BucketAccessControl"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ }
+ }
+ },
+ "buckets": {
+ "methods": {
+ "delete": {
+ "id": "storage.buckets.delete",
+ "path": "b/{bucket}",
+ "httpMethod": "DELETE",
+ "description": "Permanently deletes an empty bucket.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "ifMetagenerationMatch": {
+ "type": "string",
+ "description": "If set, only deletes the bucket if its metageneration matches this value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifMetagenerationNotMatch": {
+ "type": "string",
+ "description": "If set, only deletes the bucket if its metageneration does not match this value.",
+ "format": "int64",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket"
+ ],
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ]
+ },
+ "get": {
+ "id": "storage.buckets.get",
+ "path": "b/{bucket}",
+ "httpMethod": "GET",
+ "description": "Returns metadata for the specified bucket.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "ifMetagenerationMatch": {
+ "type": "string",
+ "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifMetagenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "projection": {
+ "type": "string",
+ "description": "Set of properties to return. Defaults to noAcl.",
+ "enum": [
+ "full",
+ "noAcl"
+ ],
+ "enumDescriptions": [
+ "Include all properties.",
+ "Omit owner, acl and defaultObjectAcl properties."
+ ],
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket"
+ ],
+ "response": {
+ "$ref": "Bucket"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ]
+ },
+ "insert": {
+ "id": "storage.buckets.insert",
+ "path": "b",
+ "httpMethod": "POST",
+ "description": "Creates a new bucket.",
+ "parameters": {
+ "predefinedAcl": {
+ "type": "string",
+ "description": "Apply a predefined set of access controls to this bucket.",
+ "enum": [
+ "authenticatedRead",
+ "private",
+ "projectPrivate",
+ "publicRead",
+ "publicReadWrite"
+ ],
+ "enumDescriptions": [
+ "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.",
+ "Project team owners get OWNER access.",
+ "Project team members get access according to their roles.",
+ "Project team owners get OWNER access, and allUsers get READER access.",
+ "Project team owners get OWNER access, and allUsers get WRITER access."
+ ],
+ "location": "query"
+ },
+ "predefinedDefaultObjectAcl": {
+ "type": "string",
+ "description": "Apply a predefined set of default object access controls to this bucket.",
+ "enum": [
+ "authenticatedRead",
+ "bucketOwnerFullControl",
+ "bucketOwnerRead",
+ "private",
+ "projectPrivate",
+ "publicRead"
+ ],
+ "enumDescriptions": [
+ "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
+ "Object owner gets OWNER access, and project team owners get OWNER access.",
+ "Object owner gets OWNER access, and project team owners get READER access.",
+ "Object owner gets OWNER access.",
+ "Object owner gets OWNER access, and project team members get access according to their roles.",
+ "Object owner gets OWNER access, and allUsers get READER access."
+ ],
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "A valid API project identifier.",
+ "required": true,
+ "location": "query"
+ },
+ "projection": {
+ "type": "string",
+ "description": "Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full.",
+ "enum": [
+ "full",
+ "noAcl"
+ ],
+ "enumDescriptions": [
+ "Include all properties.",
+ "Omit owner, acl and defaultObjectAcl properties."
+ ],
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "request": {
+ "$ref": "Bucket"
+ },
+ "response": {
+ "$ref": "Bucket"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ]
+ },
+ "list": {
+ "id": "storage.buckets.list",
+ "path": "b",
+ "httpMethod": "GET",
+ "description": "Retrieves a list of buckets for a given project.",
+ "parameters": {
+ "maxResults": {
+ "type": "integer",
+ "description": "Maximum number of buckets to return.",
+ "format": "uint32",
+ "minimum": "0",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "A previously-returned page token representing part of the larger set of results to view.",
+ "location": "query"
+ },
+ "prefix": {
+ "type": "string",
+ "description": "Filter results to buckets whose names begin with this prefix.",
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "A valid API project identifier.",
+ "required": true,
+ "location": "query"
+ },
+ "projection": {
+ "type": "string",
+ "description": "Set of properties to return. Defaults to noAcl.",
+ "enum": [
+ "full",
+ "noAcl"
+ ],
+ "enumDescriptions": [
+ "Include all properties.",
+ "Omit owner, acl and defaultObjectAcl properties."
+ ],
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "response": {
+ "$ref": "Buckets"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ]
+ },
+ "patch": {
+ "id": "storage.buckets.patch",
+ "path": "b/{bucket}",
+ "httpMethod": "PATCH",
+ "description": "Updates a bucket. This method supports patch semantics.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "ifMetagenerationMatch": {
+ "type": "string",
+ "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifMetagenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "predefinedAcl": {
+ "type": "string",
+ "description": "Apply a predefined set of access controls to this bucket.",
+ "enum": [
+ "authenticatedRead",
+ "private",
+ "projectPrivate",
+ "publicRead",
+ "publicReadWrite"
+ ],
+ "enumDescriptions": [
+ "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.",
+ "Project team owners get OWNER access.",
+ "Project team members get access according to their roles.",
+ "Project team owners get OWNER access, and allUsers get READER access.",
+ "Project team owners get OWNER access, and allUsers get WRITER access."
+ ],
+ "location": "query"
+ },
+ "predefinedDefaultObjectAcl": {
+ "type": "string",
+ "description": "Apply a predefined set of default object access controls to this bucket.",
+ "enum": [
+ "authenticatedRead",
+ "bucketOwnerFullControl",
+ "bucketOwnerRead",
+ "private",
+ "projectPrivate",
+ "publicRead"
+ ],
+ "enumDescriptions": [
+ "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
+ "Object owner gets OWNER access, and project team owners get OWNER access.",
+ "Object owner gets OWNER access, and project team owners get READER access.",
+ "Object owner gets OWNER access.",
+ "Object owner gets OWNER access, and project team members get access according to their roles.",
+ "Object owner gets OWNER access, and allUsers get READER access."
+ ],
+ "location": "query"
+ },
+ "projection": {
+ "type": "string",
+ "description": "Set of properties to return. Defaults to full.",
+ "enum": [
+ "full",
+ "noAcl"
+ ],
+ "enumDescriptions": [
+ "Include all properties.",
+ "Omit owner, acl and defaultObjectAcl properties."
+ ],
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket"
+ ],
+ "request": {
+ "$ref": "Bucket"
+ },
+ "response": {
+ "$ref": "Bucket"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ },
+ "update": {
+ "id": "storage.buckets.update",
+ "path": "b/{bucket}",
+ "httpMethod": "PUT",
+ "description": "Updates a bucket.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "ifMetagenerationMatch": {
+ "type": "string",
+ "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifMetagenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "predefinedAcl": {
+ "type": "string",
+ "description": "Apply a predefined set of access controls to this bucket.",
+ "enum": [
+ "authenticatedRead",
+ "private",
+ "projectPrivate",
+ "publicRead",
+ "publicReadWrite"
+ ],
+ "enumDescriptions": [
+ "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.",
+ "Project team owners get OWNER access.",
+ "Project team members get access according to their roles.",
+ "Project team owners get OWNER access, and allUsers get READER access.",
+ "Project team owners get OWNER access, and allUsers get WRITER access."
+ ],
+ "location": "query"
+ },
+ "predefinedDefaultObjectAcl": {
+ "type": "string",
+ "description": "Apply a predefined set of default object access controls to this bucket.",
+ "enum": [
+ "authenticatedRead",
+ "bucketOwnerFullControl",
+ "bucketOwnerRead",
+ "private",
+ "projectPrivate",
+ "publicRead"
+ ],
+ "enumDescriptions": [
+ "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
+ "Object owner gets OWNER access, and project team owners get OWNER access.",
+ "Object owner gets OWNER access, and project team owners get READER access.",
+ "Object owner gets OWNER access.",
+ "Object owner gets OWNER access, and project team members get access according to their roles.",
+ "Object owner gets OWNER access, and allUsers get READER access."
+ ],
+ "location": "query"
+ },
+ "projection": {
+ "type": "string",
+ "description": "Set of properties to return. Defaults to full.",
+ "enum": [
+ "full",
+ "noAcl"
+ ],
+ "enumDescriptions": [
+ "Include all properties.",
+ "Omit owner, acl and defaultObjectAcl properties."
+ ],
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket"
+ ],
+ "request": {
+ "$ref": "Bucket"
+ },
+ "response": {
+ "$ref": "Bucket"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ }
+ }
+ },
+ "channels": {
+ "methods": {
+ "stop": {
+ "id": "storage.channels.stop",
+ "path": "channels/stop",
+ "httpMethod": "POST",
+ "description": "Stop watching resources through this channel",
+ "request": {
+ "$ref": "Channel",
+ "parameterName": "resource"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ]
+ }
+ }
+ },
+ "defaultObjectAccessControls": {
+ "methods": {
+ "delete": {
+ "id": "storage.defaultObjectAccessControls.delete",
+ "path": "b/{bucket}/defaultObjectAcl/{entity}",
+ "httpMethod": "DELETE",
+ "description": "Permanently deletes the default object ACL entry for the specified entity on the specified bucket.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "entity": {
+ "type": "string",
+ "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "entity"
+ ],
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ },
+ "get": {
+ "id": "storage.defaultObjectAccessControls.get",
+ "path": "b/{bucket}/defaultObjectAcl/{entity}",
+ "httpMethod": "GET",
+ "description": "Returns the default object ACL entry for the specified entity on the specified bucket.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "entity": {
+ "type": "string",
+ "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "entity"
+ ],
+ "response": {
+ "$ref": "ObjectAccessControl"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ },
+ "insert": {
+ "id": "storage.defaultObjectAccessControls.insert",
+ "path": "b/{bucket}/defaultObjectAcl",
+ "httpMethod": "POST",
+ "description": "Creates a new default object ACL entry on the specified bucket.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "bucket"
+ ],
+ "request": {
+ "$ref": "ObjectAccessControl"
+ },
+ "response": {
+ "$ref": "ObjectAccessControl"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ },
+ "list": {
+ "id": "storage.defaultObjectAccessControls.list",
+ "path": "b/{bucket}/defaultObjectAcl",
+ "httpMethod": "GET",
+ "description": "Retrieves default object ACL entries on the specified bucket.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "ifMetagenerationMatch": {
+ "type": "string",
+ "description": "If present, only return default ACL listing if the bucket's current metageneration matches this value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifMetagenerationNotMatch": {
+ "type": "string",
+ "description": "If present, only return default ACL listing if the bucket's current metageneration does not match the given value.",
+ "format": "int64",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket"
+ ],
+ "response": {
+ "$ref": "ObjectAccessControls"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ },
+ "patch": {
+ "id": "storage.defaultObjectAccessControls.patch",
+ "path": "b/{bucket}/defaultObjectAcl/{entity}",
+ "httpMethod": "PATCH",
+ "description": "Updates a default object ACL entry on the specified bucket. This method supports patch semantics.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "entity": {
+ "type": "string",
+ "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "entity"
+ ],
+ "request": {
+ "$ref": "ObjectAccessControl"
+ },
+ "response": {
+ "$ref": "ObjectAccessControl"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ },
+ "update": {
+ "id": "storage.defaultObjectAccessControls.update",
+ "path": "b/{bucket}/defaultObjectAcl/{entity}",
+ "httpMethod": "PUT",
+ "description": "Updates a default object ACL entry on the specified bucket.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "entity": {
+ "type": "string",
+ "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "entity"
+ ],
+ "request": {
+ "$ref": "ObjectAccessControl"
+ },
+ "response": {
+ "$ref": "ObjectAccessControl"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ }
+ }
+ },
+ "objectAccessControls": {
+ "methods": {
+ "delete": {
+ "id": "storage.objectAccessControls.delete",
+ "path": "b/{bucket}/o/{object}/acl/{entity}",
+ "httpMethod": "DELETE",
+ "description": "Permanently deletes the ACL entry for the specified entity on the specified object.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "entity": {
+ "type": "string",
+ "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
+ "required": true,
+ "location": "path"
+ },
+ "generation": {
+ "type": "string",
+ "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
+ "format": "int64",
+ "location": "query"
+ },
+ "object": {
+ "type": "string",
+ "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "object",
+ "entity"
+ ],
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ },
+ "get": {
+ "id": "storage.objectAccessControls.get",
+ "path": "b/{bucket}/o/{object}/acl/{entity}",
+ "httpMethod": "GET",
+ "description": "Returns the ACL entry for the specified entity on the specified object.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "entity": {
+ "type": "string",
+ "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
+ "required": true,
+ "location": "path"
+ },
+ "generation": {
+ "type": "string",
+ "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
+ "format": "int64",
+ "location": "query"
+ },
+ "object": {
+ "type": "string",
+ "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "object",
+ "entity"
+ ],
+ "response": {
+ "$ref": "ObjectAccessControl"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ },
+ "insert": {
+ "id": "storage.objectAccessControls.insert",
+ "path": "b/{bucket}/o/{object}/acl",
+ "httpMethod": "POST",
+ "description": "Creates a new ACL entry on the specified object.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "generation": {
+ "type": "string",
+ "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
+ "format": "int64",
+ "location": "query"
+ },
+ "object": {
+ "type": "string",
+ "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "object"
+ ],
+ "request": {
+ "$ref": "ObjectAccessControl"
+ },
+ "response": {
+ "$ref": "ObjectAccessControl"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ },
+ "list": {
+ "id": "storage.objectAccessControls.list",
+ "path": "b/{bucket}/o/{object}/acl",
+ "httpMethod": "GET",
+ "description": "Retrieves ACL entries on the specified object.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "generation": {
+ "type": "string",
+ "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
+ "format": "int64",
+ "location": "query"
+ },
+ "object": {
+ "type": "string",
+ "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "object"
+ ],
+ "response": {
+ "$ref": "ObjectAccessControls"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ },
+ "patch": {
+ "id": "storage.objectAccessControls.patch",
+ "path": "b/{bucket}/o/{object}/acl/{entity}",
+ "httpMethod": "PATCH",
+ "description": "Updates an ACL entry on the specified object. This method supports patch semantics.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "entity": {
+ "type": "string",
+ "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
+ "required": true,
+ "location": "path"
+ },
+ "generation": {
+ "type": "string",
+ "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
+ "format": "int64",
+ "location": "query"
+ },
+ "object": {
+ "type": "string",
+ "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "object",
+ "entity"
+ ],
+ "request": {
+ "$ref": "ObjectAccessControl"
+ },
+ "response": {
+ "$ref": "ObjectAccessControl"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ },
+ "update": {
+ "id": "storage.objectAccessControls.update",
+ "path": "b/{bucket}/o/{object}/acl/{entity}",
+ "httpMethod": "PUT",
+ "description": "Updates an ACL entry on the specified object.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of a bucket.",
+ "required": true,
+ "location": "path"
+ },
+ "entity": {
+ "type": "string",
+ "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
+ "required": true,
+ "location": "path"
+ },
+ "generation": {
+ "type": "string",
+ "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
+ "format": "int64",
+ "location": "query"
+ },
+ "object": {
+ "type": "string",
+ "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "object",
+ "entity"
+ ],
+ "request": {
+ "$ref": "ObjectAccessControl"
+ },
+ "response": {
+ "$ref": "ObjectAccessControl"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ }
+ }
+ },
+ "objects": {
+ "methods": {
+ "compose": {
+ "id": "storage.objects.compose",
+ "path": "b/{destinationBucket}/o/{destinationObject}/compose",
+ "httpMethod": "POST",
+ "description": "Concatenates a list of existing objects into a new object in the same bucket.",
+ "parameters": {
+ "destinationBucket": {
+ "type": "string",
+ "description": "Name of the bucket in which to store the new object.",
+ "required": true,
+ "location": "path"
+ },
+ "destinationObject": {
+ "type": "string",
+ "description": "Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "required": true,
+ "location": "path"
+ },
+ "destinationPredefinedAcl": {
+ "type": "string",
+ "description": "Apply a predefined set of access controls to the destination object.",
+ "enum": [
+ "authenticatedRead",
+ "bucketOwnerFullControl",
+ "bucketOwnerRead",
+ "private",
+ "projectPrivate",
+ "publicRead"
+ ],
+ "enumDescriptions": [
+ "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
+ "Object owner gets OWNER access, and project team owners get OWNER access.",
+ "Object owner gets OWNER access, and project team owners get READER access.",
+ "Object owner gets OWNER access.",
+ "Object owner gets OWNER access, and project team members get access according to their roles.",
+ "Object owner gets OWNER access, and allUsers get READER access."
+ ],
+ "location": "query"
+ },
+ "ifGenerationMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current generation matches the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifMetagenerationMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
+ "format": "int64",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "destinationBucket",
+ "destinationObject"
+ ],
+ "request": {
+ "$ref": "ComposeRequest"
+ },
+ "response": {
+ "$ref": "Object"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ],
+ "supportsMediaDownload": true,
+ "useMediaDownloadService": true
+ },
+ "copy": {
+ "id": "storage.objects.copy",
+ "path": "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}",
+ "httpMethod": "POST",
+ "description": "Copies a source object to a destination object. Optionally overrides metadata.",
+ "parameters": {
+ "destinationBucket": {
+ "type": "string",
+ "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "required": true,
+ "location": "path"
+ },
+ "destinationObject": {
+ "type": "string",
+ "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.",
+ "required": true,
+ "location": "path"
+ },
+ "destinationPredefinedAcl": {
+ "type": "string",
+ "description": "Apply a predefined set of access controls to the destination object.",
+ "enum": [
+ "authenticatedRead",
+ "bucketOwnerFullControl",
+ "bucketOwnerRead",
+ "private",
+ "projectPrivate",
+ "publicRead"
+ ],
+ "enumDescriptions": [
+ "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
+ "Object owner gets OWNER access, and project team owners get OWNER access.",
+ "Object owner gets OWNER access, and project team owners get READER access.",
+ "Object owner gets OWNER access.",
+ "Object owner gets OWNER access, and project team members get access according to their roles.",
+ "Object owner gets OWNER access, and allUsers get READER access."
+ ],
+ "location": "query"
+ },
+ "ifGenerationMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the destination object's current generation matches the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifGenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifMetagenerationMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifMetagenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifSourceGenerationMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the source object's generation matches the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifSourceGenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the source object's generation does not match the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifSourceMetagenerationMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifSourceMetagenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "projection": {
+ "type": "string",
+ "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.",
+ "enum": [
+ "full",
+ "noAcl"
+ ],
+ "enumDescriptions": [
+ "Include all properties.",
+ "Omit the owner, acl property."
+ ],
+ "location": "query"
+ },
+ "sourceBucket": {
+ "type": "string",
+ "description": "Name of the bucket in which to find the source object.",
+ "required": true,
+ "location": "path"
+ },
+ "sourceGeneration": {
+ "type": "string",
+ "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).",
+ "format": "int64",
+ "location": "query"
+ },
+ "sourceObject": {
+ "type": "string",
+ "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "sourceBucket",
+ "sourceObject",
+ "destinationBucket",
+ "destinationObject"
+ ],
+ "request": {
+ "$ref": "Object"
+ },
+ "response": {
+ "$ref": "Object"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ],
+ "supportsMediaDownload": true,
+ "useMediaDownloadService": true
+ },
+ "delete": {
+ "id": "storage.objects.delete",
+ "path": "b/{bucket}/o/{object}",
+ "httpMethod": "DELETE",
+ "description": "Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of the bucket in which the object resides.",
+ "required": true,
+ "location": "path"
+ },
+ "generation": {
+ "type": "string",
+ "description": "If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default).",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifGenerationMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current generation matches the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifGenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current generation does not match the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifMetagenerationMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifMetagenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "object": {
+ "type": "string",
+ "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "object"
+ ],
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ]
+ },
+ "get": {
+ "id": "storage.objects.get",
+ "path": "b/{bucket}/o/{object}",
+ "httpMethod": "GET",
+ "description": "Retrieves an object or its metadata.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of the bucket in which the object resides.",
+ "required": true,
+ "location": "path"
+ },
+ "generation": {
+ "type": "string",
+ "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifGenerationMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's generation matches the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifGenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's generation does not match the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifMetagenerationMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifMetagenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "object": {
+ "type": "string",
+ "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "required": true,
+ "location": "path"
+ },
+ "projection": {
+ "type": "string",
+ "description": "Set of properties to return. Defaults to noAcl.",
+ "enum": [
+ "full",
+ "noAcl"
+ ],
+ "enumDescriptions": [
+ "Include all properties.",
+ "Omit the owner, acl property."
+ ],
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "object"
+ ],
+ "response": {
+ "$ref": "Object"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ],
+ "supportsMediaDownload": true,
+ "useMediaDownloadService": true
+ },
+ "insert": {
+ "id": "storage.objects.insert",
+ "path": "b/{bucket}/o",
+ "httpMethod": "POST",
+ "description": "Stores a new object and metadata.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.",
+ "required": true,
+ "location": "path"
+ },
+ "contentEncoding": {
+ "type": "string",
+ "description": "If set, sets the contentEncoding property of the final object to this value. Setting this parameter is equivalent to setting the contentEncoding metadata property. This can be useful when uploading an object with uploadType=media to indicate the encoding of the content being uploaded.",
+ "location": "query"
+ },
+ "ifGenerationMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current generation matches the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifGenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current generation does not match the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifMetagenerationMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifMetagenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "name": {
+ "type": "string",
+ "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "location": "query"
+ },
+ "predefinedAcl": {
+ "type": "string",
+ "description": "Apply a predefined set of access controls to this object.",
+ "enum": [
+ "authenticatedRead",
+ "bucketOwnerFullControl",
+ "bucketOwnerRead",
+ "private",
+ "projectPrivate",
+ "publicRead"
+ ],
+ "enumDescriptions": [
+ "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
+ "Object owner gets OWNER access, and project team owners get OWNER access.",
+ "Object owner gets OWNER access, and project team owners get READER access.",
+ "Object owner gets OWNER access.",
+ "Object owner gets OWNER access, and project team members get access according to their roles.",
+ "Object owner gets OWNER access, and allUsers get READER access."
+ ],
+ "location": "query"
+ },
+ "projection": {
+ "type": "string",
+ "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.",
+ "enum": [
+ "full",
+ "noAcl"
+ ],
+ "enumDescriptions": [
+ "Include all properties.",
+ "Omit the owner, acl property."
+ ],
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket"
+ ],
+ "request": {
+ "$ref": "Object"
+ },
+ "response": {
+ "$ref": "Object"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ],
+ "supportsMediaDownload": true,
+ "useMediaDownloadService": true,
+ "supportsMediaUpload": true,
+ "mediaUpload": {
+ "accept": [
+ "*/*"
+ ],
+ "protocols": {
+ "simple": {
+ "multipart": true,
+ "path": "/upload/storage/v1/b/{bucket}/o"
+ },
+ "resumable": {
+ "multipart": true,
+ "path": "/resumable/upload/storage/v1/b/{bucket}/o"
+ }
+ }
+ }
+ },
+ "list": {
+ "id": "storage.objects.list",
+ "path": "b/{bucket}/o",
+ "httpMethod": "GET",
+ "description": "Retrieves a list of objects matching the criteria.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of the bucket in which to look for objects.",
+ "required": true,
+ "location": "path"
+ },
+ "delimiter": {
+ "type": "string",
+ "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.",
+ "location": "query"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Maximum number of items plus prefixes to return. As duplicate prefixes are omitted, fewer total results may be returned than requested. The default value of this parameter is 1,000 items.",
+ "format": "uint32",
+ "minimum": "0",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "A previously-returned page token representing part of the larger set of results to view.",
+ "location": "query"
+ },
+ "prefix": {
+ "type": "string",
+ "description": "Filter results to objects whose names begin with this prefix.",
+ "location": "query"
+ },
+ "projection": {
+ "type": "string",
+ "description": "Set of properties to return. Defaults to noAcl.",
+ "enum": [
+ "full",
+ "noAcl"
+ ],
+ "enumDescriptions": [
+ "Include all properties.",
+ "Omit the owner, acl property."
+ ],
+ "location": "query"
+ },
+ "versions": {
+ "type": "boolean",
+ "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket"
+ ],
+ "response": {
+ "$ref": "Objects"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ],
+ "supportsSubscription": true
+ },
+ "patch": {
+ "id": "storage.objects.patch",
+ "path": "b/{bucket}/o/{object}",
+ "httpMethod": "PATCH",
+ "description": "Updates an object's metadata. This method supports patch semantics.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of the bucket in which the object resides.",
+ "required": true,
+ "location": "path"
+ },
+ "generation": {
+ "type": "string",
+ "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifGenerationMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current generation matches the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifGenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current generation does not match the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifMetagenerationMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifMetagenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "object": {
+ "type": "string",
+ "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "required": true,
+ "location": "path"
+ },
+ "predefinedAcl": {
+ "type": "string",
+ "description": "Apply a predefined set of access controls to this object.",
+ "enum": [
+ "authenticatedRead",
+ "bucketOwnerFullControl",
+ "bucketOwnerRead",
+ "private",
+ "projectPrivate",
+ "publicRead"
+ ],
+ "enumDescriptions": [
+ "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
+ "Object owner gets OWNER access, and project team owners get OWNER access.",
+ "Object owner gets OWNER access, and project team owners get READER access.",
+ "Object owner gets OWNER access.",
+ "Object owner gets OWNER access, and project team members get access according to their roles.",
+ "Object owner gets OWNER access, and allUsers get READER access."
+ ],
+ "location": "query"
+ },
+ "projection": {
+ "type": "string",
+ "description": "Set of properties to return. Defaults to full.",
+ "enum": [
+ "full",
+ "noAcl"
+ ],
+ "enumDescriptions": [
+ "Include all properties.",
+ "Omit the owner, acl property."
+ ],
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "object"
+ ],
+ "request": {
+ "$ref": "Object"
+ },
+ "response": {
+ "$ref": "Object"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ]
+ },
+ "rewrite": {
+ "id": "storage.objects.rewrite",
+ "path": "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}",
+ "httpMethod": "POST",
+ "description": "Rewrites a source object to a destination object. Optionally overrides metadata.",
+ "parameters": {
+ "destinationBucket": {
+ "type": "string",
+ "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.",
+ "required": true,
+ "location": "path"
+ },
+ "destinationObject": {
+ "type": "string",
+ "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "required": true,
+ "location": "path"
+ },
+ "destinationPredefinedAcl": {
+ "type": "string",
+ "description": "Apply a predefined set of access controls to the destination object.",
+ "enum": [
+ "authenticatedRead",
+ "bucketOwnerFullControl",
+ "bucketOwnerRead",
+ "private",
+ "projectPrivate",
+ "publicRead"
+ ],
+ "enumDescriptions": [
+ "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
+ "Object owner gets OWNER access, and project team owners get OWNER access.",
+ "Object owner gets OWNER access, and project team owners get READER access.",
+ "Object owner gets OWNER access.",
+ "Object owner gets OWNER access, and project team members get access according to their roles.",
+ "Object owner gets OWNER access, and allUsers get READER access."
+ ],
+ "location": "query"
+ },
+ "ifGenerationMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the destination object's current generation matches the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifGenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifMetagenerationMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifMetagenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifSourceGenerationMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the source object's generation matches the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifSourceGenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the source object's generation does not match the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifSourceMetagenerationMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifSourceMetagenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "maxBytesRewrittenPerCall": {
+ "type": "string",
+ "description": "The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid.",
+ "format": "int64",
+ "location": "query"
+ },
+ "projection": {
+ "type": "string",
+ "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.",
+ "enum": [
+ "full",
+ "noAcl"
+ ],
+ "enumDescriptions": [
+ "Include all properties.",
+ "Omit the owner, acl property."
+ ],
+ "location": "query"
+ },
+ "rewriteToken": {
+ "type": "string",
+ "description": "Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.",
+ "location": "query"
+ },
+ "sourceBucket": {
+ "type": "string",
+ "description": "Name of the bucket in which to find the source object.",
+ "required": true,
+ "location": "path"
+ },
+ "sourceGeneration": {
+ "type": "string",
+ "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).",
+ "format": "int64",
+ "location": "query"
+ },
+ "sourceObject": {
+ "type": "string",
+ "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "sourceBucket",
+ "sourceObject",
+ "destinationBucket",
+ "destinationObject"
+ ],
+ "request": {
+ "$ref": "Object"
+ },
+ "response": {
+ "$ref": "RewriteResponse"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ]
+ },
+ "update": {
+ "id": "storage.objects.update",
+ "path": "b/{bucket}/o/{object}",
+ "httpMethod": "PUT",
+ "description": "Updates an object's metadata.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of the bucket in which the object resides.",
+ "required": true,
+ "location": "path"
+ },
+ "generation": {
+ "type": "string",
+ "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifGenerationMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current generation matches the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifGenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current generation does not match the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifMetagenerationMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "ifMetagenerationNotMatch": {
+ "type": "string",
+ "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
+ "format": "int64",
+ "location": "query"
+ },
+ "object": {
+ "type": "string",
+ "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ "required": true,
+ "location": "path"
+ },
+ "predefinedAcl": {
+ "type": "string",
+ "description": "Apply a predefined set of access controls to this object.",
+ "enum": [
+ "authenticatedRead",
+ "bucketOwnerFullControl",
+ "bucketOwnerRead",
+ "private",
+ "projectPrivate",
+ "publicRead"
+ ],
+ "enumDescriptions": [
+ "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
+ "Object owner gets OWNER access, and project team owners get OWNER access.",
+ "Object owner gets OWNER access, and project team owners get READER access.",
+ "Object owner gets OWNER access.",
+ "Object owner gets OWNER access, and project team members get access according to their roles.",
+ "Object owner gets OWNER access, and allUsers get READER access."
+ ],
+ "location": "query"
+ },
+ "projection": {
+ "type": "string",
+ "description": "Set of properties to return. Defaults to full.",
+ "enum": [
+ "full",
+ "noAcl"
+ ],
+ "enumDescriptions": [
+ "Include all properties.",
+ "Omit the owner, acl property."
+ ],
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket",
+ "object"
+ ],
+ "request": {
+ "$ref": "Object"
+ },
+ "response": {
+ "$ref": "Object"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/devstorage.full_control"
+ ],
+ "supportsMediaDownload": true,
+ "useMediaDownloadService": true
+ },
+ "watchAll": {
+ "id": "storage.objects.watchAll",
+ "path": "b/{bucket}/o/watch",
+ "httpMethod": "POST",
+ "description": "Watch for changes on all objects in a bucket.",
+ "parameters": {
+ "bucket": {
+ "type": "string",
+ "description": "Name of the bucket in which to look for objects.",
+ "required": true,
+ "location": "path"
+ },
+ "delimiter": {
+ "type": "string",
+ "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.",
+ "location": "query"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Maximum number of items plus prefixes to return. As duplicate prefixes are omitted, fewer total results may be returned than requested. The default value of this parameter is 1,000 items.",
+ "format": "uint32",
+ "minimum": "0",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "A previously-returned page token representing part of the larger set of results to view.",
+ "location": "query"
+ },
+ "prefix": {
+ "type": "string",
+ "description": "Filter results to objects whose names begin with this prefix.",
+ "location": "query"
+ },
+ "projection": {
+ "type": "string",
+ "description": "Set of properties to return. Defaults to noAcl.",
+ "enum": [
+ "full",
+ "noAcl"
+ ],
+ "enumDescriptions": [
+ "Include all properties.",
+ "Omit the owner, acl property."
+ ],
+ "location": "query"
+ },
+ "versions": {
+ "type": "boolean",
+ "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.",
+ "location": "query"
+ }
+ },
+ "parameterOrder": [
+ "bucket"
+ ],
+ "request": {
+ "$ref": "Channel",
+ "parameterName": "resource"
+ },
+ "response": {
+ "$ref": "Channel"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform",
+ "https://www.googleapis.com/auth/cloud-platform.read-only",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ],
+ "supportsSubscription": true
+ }
+ }
+ }
+ }
+}
diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go
new file mode 100644
index 0000000..323c9ca
--- /dev/null
+++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go
@@ -0,0 +1,7740 @@
+// Package storage provides access to the Cloud Storage JSON API.
+//
+// See https://developers.google.com/storage/docs/json_api/
+//
+// Usage example:
+//
+// import "google.golang.org/api/storage/v1"
+// ...
+// storageService, err := storage.New(oauthHttpClient)
+package storage // import "google.golang.org/api/storage/v1"
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ context "golang.org/x/net/context"
+ ctxhttp "golang.org/x/net/context/ctxhttp"
+ gensupport "google.golang.org/api/gensupport"
+ googleapi "google.golang.org/api/googleapi"
+ "io"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+)
+
+// Always reference these packages, just in case the auto-generated code
+// below doesn't.
+var _ = bytes.NewBuffer
+var _ = strconv.Itoa
+var _ = fmt.Sprintf
+var _ = json.NewDecoder
+var _ = io.Copy
+var _ = url.Parse
+var _ = gensupport.MarshalJSON
+var _ = googleapi.Version
+var _ = errors.New
+var _ = strings.Replace
+var _ = context.Canceled
+var _ = ctxhttp.Do
+
+const apiId = "storage:v1"
+const apiName = "storage"
+const apiVersion = "v1"
+const basePath = "https://www.googleapis.com/storage/v1/"
+
+// OAuth2 scopes used by this API.
+const (
+ // View and manage your data across Google Cloud Platform services
+ CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform"
+
+ // View your data across Google Cloud Platform services
+ CloudPlatformReadOnlyScope = "https://www.googleapis.com/auth/cloud-platform.read-only"
+
+ // Manage your data and permissions in Google Cloud Storage
+ DevstorageFullControlScope = "https://www.googleapis.com/auth/devstorage.full_control"
+
+ // View your data in Google Cloud Storage
+ DevstorageReadOnlyScope = "https://www.googleapis.com/auth/devstorage.read_only"
+
+ // Manage your data in Google Cloud Storage
+ DevstorageReadWriteScope = "https://www.googleapis.com/auth/devstorage.read_write"
+)
+
+func New(client *http.Client) (*Service, error) {
+ if client == nil {
+ return nil, errors.New("client is nil")
+ }
+ s := &Service{client: client, BasePath: basePath}
+ s.BucketAccessControls = NewBucketAccessControlsService(s)
+ s.Buckets = NewBucketsService(s)
+ s.Channels = NewChannelsService(s)
+ s.DefaultObjectAccessControls = NewDefaultObjectAccessControlsService(s)
+ s.ObjectAccessControls = NewObjectAccessControlsService(s)
+ s.Objects = NewObjectsService(s)
+ return s, nil
+}
+
+type Service struct {
+ client *http.Client
+ BasePath string // API endpoint base URL
+ UserAgent string // optional additional User-Agent fragment
+
+ BucketAccessControls *BucketAccessControlsService
+
+ Buckets *BucketsService
+
+ Channels *ChannelsService
+
+ DefaultObjectAccessControls *DefaultObjectAccessControlsService
+
+ ObjectAccessControls *ObjectAccessControlsService
+
+ Objects *ObjectsService
+}
+
+func (s *Service) userAgent() string {
+ if s.UserAgent == "" {
+ return googleapi.UserAgent
+ }
+ return googleapi.UserAgent + " " + s.UserAgent
+}
+
+func NewBucketAccessControlsService(s *Service) *BucketAccessControlsService {
+ rs := &BucketAccessControlsService{s: s}
+ return rs
+}
+
+type BucketAccessControlsService struct {
+ s *Service
+}
+
+func NewBucketsService(s *Service) *BucketsService {
+ rs := &BucketsService{s: s}
+ return rs
+}
+
+type BucketsService struct {
+ s *Service
+}
+
+func NewChannelsService(s *Service) *ChannelsService {
+ rs := &ChannelsService{s: s}
+ return rs
+}
+
+type ChannelsService struct {
+ s *Service
+}
+
+func NewDefaultObjectAccessControlsService(s *Service) *DefaultObjectAccessControlsService {
+ rs := &DefaultObjectAccessControlsService{s: s}
+ return rs
+}
+
+type DefaultObjectAccessControlsService struct {
+ s *Service
+}
+
+func NewObjectAccessControlsService(s *Service) *ObjectAccessControlsService {
+ rs := &ObjectAccessControlsService{s: s}
+ return rs
+}
+
+type ObjectAccessControlsService struct {
+ s *Service
+}
+
+func NewObjectsService(s *Service) *ObjectsService {
+ rs := &ObjectsService{s: s}
+ return rs
+}
+
+type ObjectsService struct {
+ s *Service
+}
+
+// Bucket: A bucket.
+type Bucket struct {
+ // Acl: Access controls on the bucket.
+ Acl []*BucketAccessControl `json:"acl,omitempty"`
+
+ // Cors: The bucket's Cross-Origin Resource Sharing (CORS)
+ // configuration.
+ Cors []*BucketCors `json:"cors,omitempty"`
+
+ // DefaultObjectAcl: Default access controls to apply to new objects
+ // when no ACL is provided.
+ DefaultObjectAcl []*ObjectAccessControl `json:"defaultObjectAcl,omitempty"`
+
+ // Etag: HTTP 1.1 Entity tag for the bucket.
+ Etag string `json:"etag,omitempty"`
+
+ // Id: The ID of the bucket.
+ Id string `json:"id,omitempty"`
+
+ // Kind: The kind of item this is. For buckets, this is always
+ // storage#bucket.
+ Kind string `json:"kind,omitempty"`
+
+ // Lifecycle: The bucket's lifecycle configuration. See lifecycle
+ // management for more information.
+ Lifecycle *BucketLifecycle `json:"lifecycle,omitempty"`
+
+ // Location: The location of the bucket. Object data for objects in the
+ // bucket resides in physical storage within this region. Defaults to
+ // US. See the developer's guide for the authoritative list.
+ Location string `json:"location,omitempty"`
+
+ // Logging: The bucket's logging configuration, which defines the
+ // destination bucket and optional name prefix for the current bucket's
+ // logs.
+ Logging *BucketLogging `json:"logging,omitempty"`
+
+ // Metageneration: The metadata generation of this bucket.
+ Metageneration int64 `json:"metageneration,omitempty,string"`
+
+ // Name: The name of the bucket.
+ Name string `json:"name,omitempty"`
+
+ // Owner: The owner of the bucket. This is always the project team's
+ // owner group.
+ Owner *BucketOwner `json:"owner,omitempty"`
+
+ // ProjectNumber: The project number of the project the bucket belongs
+ // to.
+ ProjectNumber uint64 `json:"projectNumber,omitempty,string"`
+
+ // SelfLink: The URI of this bucket.
+ SelfLink string `json:"selfLink,omitempty"`
+
+ // StorageClass: The bucket's storage class. This defines how objects in
+ // the bucket are stored and determines the SLA and the cost of storage.
+ // Values include STANDARD, NEARLINE and DURABLE_REDUCED_AVAILABILITY.
+ // Defaults to STANDARD. For more information, see storage classes.
+ StorageClass string `json:"storageClass,omitempty"`
+
+ // TimeCreated: The creation time of the bucket in RFC 3339 format.
+ TimeCreated string `json:"timeCreated,omitempty"`
+
+ // Updated: The modification time of the bucket in RFC 3339 format.
+ Updated string `json:"updated,omitempty"`
+
+ // Versioning: The bucket's versioning configuration.
+ Versioning *BucketVersioning `json:"versioning,omitempty"`
+
+ // Website: The bucket's website configuration.
+ Website *BucketWebsite `json:"website,omitempty"`
+
+ // ServerResponse contains the HTTP response code and headers from the
+ // server.
+ googleapi.ServerResponse `json:"-"`
+
+ // ForceSendFields is a list of field names (e.g. "Acl") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *Bucket) MarshalJSON() ([]byte, error) {
+ type noMethod Bucket
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+type BucketCors struct {
+ // MaxAgeSeconds: The value, in seconds, to return in the
+ // Access-Control-Max-Age header used in preflight responses.
+ MaxAgeSeconds int64 `json:"maxAgeSeconds,omitempty"`
+
+ // Method: The list of HTTP methods on which to include CORS response
+ // headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list
+ // of methods, and means "any method".
+ Method []string `json:"method,omitempty"`
+
+ // Origin: The list of Origins eligible to receive CORS response
+ // headers. Note: "*" is permitted in the list of origins, and means
+ // "any Origin".
+ Origin []string `json:"origin,omitempty"`
+
+ // ResponseHeader: The list of HTTP headers other than the simple
+ // response headers to give permission for the user-agent to share
+ // across domains.
+ ResponseHeader []string `json:"responseHeader,omitempty"`
+
+ // ForceSendFields is a list of field names (e.g. "MaxAgeSeconds") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *BucketCors) MarshalJSON() ([]byte, error) {
+ type noMethod BucketCors
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// BucketLifecycle: The bucket's lifecycle configuration. See lifecycle
+// management for more information.
+type BucketLifecycle struct {
+ // Rule: A lifecycle management rule, which is made of an action to take
+ // and the condition(s) under which the action will be taken.
+ Rule []*BucketLifecycleRule `json:"rule,omitempty"`
+
+ // ForceSendFields is a list of field names (e.g. "Rule") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *BucketLifecycle) MarshalJSON() ([]byte, error) {
+ type noMethod BucketLifecycle
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+type BucketLifecycleRule struct {
+ // Action: The action to take.
+ Action *BucketLifecycleRuleAction `json:"action,omitempty"`
+
+ // Condition: The condition(s) under which the action will be taken.
+ Condition *BucketLifecycleRuleCondition `json:"condition,omitempty"`
+
+ // ForceSendFields is a list of field names (e.g. "Action") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *BucketLifecycleRule) MarshalJSON() ([]byte, error) {
+ type noMethod BucketLifecycleRule
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// BucketLifecycleRuleAction: The action to take.
+type BucketLifecycleRuleAction struct {
+ // Type: Type of the action. Currently, only Delete is supported.
+ Type string `json:"type,omitempty"`
+
+ // ForceSendFields is a list of field names (e.g. "Type") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *BucketLifecycleRuleAction) MarshalJSON() ([]byte, error) {
+ type noMethod BucketLifecycleRuleAction
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// BucketLifecycleRuleCondition: The condition(s) under which the action
+// will be taken.
+type BucketLifecycleRuleCondition struct {
+ // Age: Age of an object (in days). This condition is satisfied when an
+ // object reaches the specified age.
+ Age int64 `json:"age,omitempty"`
+
+ // CreatedBefore: A date in RFC 3339 format with only the date part (for
+ // instance, "2013-01-15"). This condition is satisfied when an object
+ // is created before midnight of the specified date in UTC.
+ CreatedBefore string `json:"createdBefore,omitempty"`
+
+ // IsLive: Relevant only for versioned objects. If the value is true,
+ // this condition matches live objects; if the value is false, it
+ // matches archived objects.
+ IsLive bool `json:"isLive,omitempty"`
+
+ // NumNewerVersions: Relevant only for versioned objects. If the value
+ // is N, this condition is satisfied when there are at least N versions
+ // (including the live version) newer than this version of the object.
+ NumNewerVersions int64 `json:"numNewerVersions,omitempty"`
+
+ // ForceSendFields is a list of field names (e.g. "Age") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *BucketLifecycleRuleCondition) MarshalJSON() ([]byte, error) {
+ type noMethod BucketLifecycleRuleCondition
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// BucketLogging: The bucket's logging configuration, which defines the
+// destination bucket and optional name prefix for the current bucket's
+// logs.
+type BucketLogging struct {
+ // LogBucket: The destination bucket where the current bucket's logs
+ // should be placed.
+ LogBucket string `json:"logBucket,omitempty"`
+
+ // LogObjectPrefix: A prefix for log object names.
+ LogObjectPrefix string `json:"logObjectPrefix,omitempty"`
+
+ // ForceSendFields is a list of field names (e.g. "LogBucket") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *BucketLogging) MarshalJSON() ([]byte, error) {
+ type noMethod BucketLogging
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// BucketOwner: The owner of the bucket. This is always the project
+// team's owner group.
+type BucketOwner struct {
+ // Entity: The entity, in the form project-owner-projectId.
+ Entity string `json:"entity,omitempty"`
+
+ // EntityId: The ID for the entity.
+ EntityId string `json:"entityId,omitempty"`
+
+ // ForceSendFields is a list of field names (e.g. "Entity") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *BucketOwner) MarshalJSON() ([]byte, error) {
+ type noMethod BucketOwner
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// BucketVersioning: The bucket's versioning configuration.
+type BucketVersioning struct {
+ // Enabled: While set to true, versioning is fully enabled for this
+ // bucket.
+ Enabled bool `json:"enabled,omitempty"`
+
+ // ForceSendFields is a list of field names (e.g. "Enabled") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *BucketVersioning) MarshalJSON() ([]byte, error) {
+ type noMethod BucketVersioning
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// BucketWebsite: The bucket's website configuration.
+type BucketWebsite struct {
+ // MainPageSuffix: Behaves as the bucket's directory index where missing
+ // objects are treated as potential directories.
+ MainPageSuffix string `json:"mainPageSuffix,omitempty"`
+
+ // NotFoundPage: The custom object to return when a requested resource
+ // is not found.
+ NotFoundPage string `json:"notFoundPage,omitempty"`
+
+ // ForceSendFields is a list of field names (e.g. "MainPageSuffix") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *BucketWebsite) MarshalJSON() ([]byte, error) {
+ type noMethod BucketWebsite
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// BucketAccessControl: An access-control entry.
+type BucketAccessControl struct {
+ // Bucket: The name of the bucket.
+ Bucket string `json:"bucket,omitempty"`
+
+ // Domain: The domain associated with the entity, if any.
+ Domain string `json:"domain,omitempty"`
+
+ // Email: The email address associated with the entity, if any.
+ Email string `json:"email,omitempty"`
+
+ // Entity: The entity holding the permission, in one of the following
+ // forms:
+ // - user-userId
+ // - user-email
+ // - group-groupId
+ // - group-email
+ // - domain-domain
+ // - project-team-projectId
+ // - allUsers
+ // - allAuthenticatedUsers Examples:
+ // - The user liz@example.com would be user-liz@example.com.
+ // - The group example@googlegroups.com would be
+ // group-example@googlegroups.com.
+ // - To refer to all members of the Google Apps for Business domain
+ // example.com, the entity would be domain-example.com.
+ Entity string `json:"entity,omitempty"`
+
+ // EntityId: The ID for the entity, if any.
+ EntityId string `json:"entityId,omitempty"`
+
+ // Etag: HTTP 1.1 Entity tag for the access-control entry.
+ Etag string `json:"etag,omitempty"`
+
+ // Id: The ID of the access-control entry.
+ Id string `json:"id,omitempty"`
+
+ // Kind: The kind of item this is. For bucket access control entries,
+ // this is always storage#bucketAccessControl.
+ Kind string `json:"kind,omitempty"`
+
+ // ProjectTeam: The project team associated with the entity, if any.
+ ProjectTeam *BucketAccessControlProjectTeam `json:"projectTeam,omitempty"`
+
+ // Role: The access permission for the entity. Can be READER, WRITER, or
+ // OWNER.
+ Role string `json:"role,omitempty"`
+
+ // SelfLink: The link to this access-control entry.
+ SelfLink string `json:"selfLink,omitempty"`
+
+ // ServerResponse contains the HTTP response code and headers from the
+ // server.
+ googleapi.ServerResponse `json:"-"`
+
+ // ForceSendFields is a list of field names (e.g. "Bucket") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *BucketAccessControl) MarshalJSON() ([]byte, error) {
+ type noMethod BucketAccessControl
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// BucketAccessControlProjectTeam: The project team associated with the
+// entity, if any.
+type BucketAccessControlProjectTeam struct {
+ // ProjectNumber: The project number.
+ ProjectNumber string `json:"projectNumber,omitempty"`
+
+ // Team: The team. Can be owners, editors, or viewers.
+ Team string `json:"team,omitempty"`
+
+ // ForceSendFields is a list of field names (e.g. "ProjectNumber") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *BucketAccessControlProjectTeam) MarshalJSON() ([]byte, error) {
+ type noMethod BucketAccessControlProjectTeam
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// BucketAccessControls: An access-control list.
+type BucketAccessControls struct {
+ // Items: The list of items.
+ Items []*BucketAccessControl `json:"items,omitempty"`
+
+ // Kind: The kind of item this is. For lists of bucket access control
+ // entries, this is always storage#bucketAccessControls.
+ Kind string `json:"kind,omitempty"`
+
+ // ServerResponse contains the HTTP response code and headers from the
+ // server.
+ googleapi.ServerResponse `json:"-"`
+
+ // ForceSendFields is a list of field names (e.g. "Items") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *BucketAccessControls) MarshalJSON() ([]byte, error) {
+ type noMethod BucketAccessControls
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// Buckets: A list of buckets.
+type Buckets struct {
+ // Items: The list of items.
+ Items []*Bucket `json:"items,omitempty"`
+
+ // Kind: The kind of item this is. For lists of buckets, this is always
+ // storage#buckets.
+ Kind string `json:"kind,omitempty"`
+
+ // NextPageToken: The continuation token, used to page through large
+ // result sets. Provide this value in a subsequent request to return the
+ // next page of results.
+ NextPageToken string `json:"nextPageToken,omitempty"`
+
+ // ServerResponse contains the HTTP response code and headers from the
+ // server.
+ googleapi.ServerResponse `json:"-"`
+
+ // ForceSendFields is a list of field names (e.g. "Items") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *Buckets) MarshalJSON() ([]byte, error) {
+ type noMethod Buckets
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// Channel: An notification channel used to watch for resource changes.
+type Channel struct {
+ // Address: The address where notifications are delivered for this
+ // channel.
+ Address string `json:"address,omitempty"`
+
+ // Expiration: Date and time of notification channel expiration,
+ // expressed as a Unix timestamp, in milliseconds. Optional.
+ Expiration int64 `json:"expiration,omitempty,string"`
+
+ // Id: A UUID or similar unique string that identifies this channel.
+ Id string `json:"id,omitempty"`
+
+ // Kind: Identifies this as a notification channel used to watch for
+ // changes to a resource. Value: the fixed string "api#channel".
+ Kind string `json:"kind,omitempty"`
+
+ // Params: Additional parameters controlling delivery channel behavior.
+ // Optional.
+ Params map[string]string `json:"params,omitempty"`
+
+ // Payload: A Boolean value to indicate whether payload is wanted.
+ // Optional.
+ Payload bool `json:"payload,omitempty"`
+
+ // ResourceId: An opaque ID that identifies the resource being watched
+ // on this channel. Stable across different API versions.
+ ResourceId string `json:"resourceId,omitempty"`
+
+ // ResourceUri: A version-specific identifier for the watched resource.
+ ResourceUri string `json:"resourceUri,omitempty"`
+
+ // Token: An arbitrary string delivered to the target address with each
+ // notification delivered over this channel. Optional.
+ Token string `json:"token,omitempty"`
+
+ // Type: The type of delivery mechanism used for this channel.
+ Type string `json:"type,omitempty"`
+
+ // ServerResponse contains the HTTP response code and headers from the
+ // server.
+ googleapi.ServerResponse `json:"-"`
+
+ // ForceSendFields is a list of field names (e.g. "Address") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *Channel) MarshalJSON() ([]byte, error) {
+ type noMethod Channel
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// ComposeRequest: A Compose request.
+type ComposeRequest struct {
+ // Destination: Properties of the resulting object.
+ Destination *Object `json:"destination,omitempty"`
+
+ // Kind: The kind of item this is.
+ Kind string `json:"kind,omitempty"`
+
+ // SourceObjects: The list of source objects that will be concatenated
+ // into a single object.
+ SourceObjects []*ComposeRequestSourceObjects `json:"sourceObjects,omitempty"`
+
+ // ForceSendFields is a list of field names (e.g. "Destination") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *ComposeRequest) MarshalJSON() ([]byte, error) {
+ type noMethod ComposeRequest
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+type ComposeRequestSourceObjects struct {
+ // Generation: The generation of this object to use as the source.
+ Generation int64 `json:"generation,omitempty,string"`
+
+ // Name: The source object's name. The source object's bucket is
+ // implicitly the destination bucket.
+ Name string `json:"name,omitempty"`
+
+ // ObjectPreconditions: Conditions that must be met for this operation
+ // to execute.
+ ObjectPreconditions *ComposeRequestSourceObjectsObjectPreconditions `json:"objectPreconditions,omitempty"`
+
+ // ForceSendFields is a list of field names (e.g. "Generation") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *ComposeRequestSourceObjects) MarshalJSON() ([]byte, error) {
+ type noMethod ComposeRequestSourceObjects
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// ComposeRequestSourceObjectsObjectPreconditions: Conditions that must
+// be met for this operation to execute.
+type ComposeRequestSourceObjectsObjectPreconditions struct {
+ // IfGenerationMatch: Only perform the composition if the generation of
+ // the source object that would be used matches this value. If this
+ // value and a generation are both specified, they must be the same
+ // value or the call will fail.
+ IfGenerationMatch int64 `json:"ifGenerationMatch,omitempty,string"`
+
+ // ForceSendFields is a list of field names (e.g. "IfGenerationMatch")
+ // to unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *ComposeRequestSourceObjectsObjectPreconditions) MarshalJSON() ([]byte, error) {
+ type noMethod ComposeRequestSourceObjectsObjectPreconditions
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// Object: An object.
+type Object struct {
+ // Acl: Access controls on the object.
+ Acl []*ObjectAccessControl `json:"acl,omitempty"`
+
+ // Bucket: The name of the bucket containing this object.
+ Bucket string `json:"bucket,omitempty"`
+
+ // CacheControl: Cache-Control directive for the object data.
+ CacheControl string `json:"cacheControl,omitempty"`
+
+ // ComponentCount: Number of underlying components that make up this
+ // object. Components are accumulated by compose operations.
+ ComponentCount int64 `json:"componentCount,omitempty"`
+
+ // ContentDisposition: Content-Disposition of the object data.
+ ContentDisposition string `json:"contentDisposition,omitempty"`
+
+ // ContentEncoding: Content-Encoding of the object data.
+ ContentEncoding string `json:"contentEncoding,omitempty"`
+
+ // ContentLanguage: Content-Language of the object data.
+ ContentLanguage string `json:"contentLanguage,omitempty"`
+
+ // ContentType: Content-Type of the object data. If contentType is not
+ // specified, object downloads will be served as
+ // application/octet-stream.
+ ContentType string `json:"contentType,omitempty"`
+
+ // Crc32c: CRC32c checksum, as described in RFC 4960, Appendix B;
+ // encoded using base64 in big-endian byte order. For more information
+ // about using the CRC32c checksum, see Hashes and ETags: Best
+ // Practices.
+ Crc32c string `json:"crc32c,omitempty"`
+
+ // CustomerEncryption: Metadata of customer-supplied encryption key, if
+ // the object is encrypted by such a key.
+ CustomerEncryption *ObjectCustomerEncryption `json:"customerEncryption,omitempty"`
+
+ // Etag: HTTP 1.1 Entity tag for the object.
+ Etag string `json:"etag,omitempty"`
+
+ // Generation: The content generation of this object. Used for object
+ // versioning.
+ Generation int64 `json:"generation,omitempty,string"`
+
+ // Id: The ID of the object.
+ Id string `json:"id,omitempty"`
+
+ // Kind: The kind of item this is. For objects, this is always
+ // storage#object.
+ Kind string `json:"kind,omitempty"`
+
+ // Md5Hash: MD5 hash of the data; encoded using base64. For more
+ // information about using the MD5 hash, see Hashes and ETags: Best
+ // Practices.
+ Md5Hash string `json:"md5Hash,omitempty"`
+
+ // MediaLink: Media download link.
+ MediaLink string `json:"mediaLink,omitempty"`
+
+ // Metadata: User-provided metadata, in key/value pairs.
+ Metadata map[string]string `json:"metadata,omitempty"`
+
+ // Metageneration: The version of the metadata for this object at this
+ // generation. Used for preconditions and for detecting changes in
+ // metadata. A metageneration number is only meaningful in the context
+ // of a particular generation of a particular object.
+ Metageneration int64 `json:"metageneration,omitempty,string"`
+
+ // Name: The name of this object. Required if not specified by URL
+ // parameter.
+ Name string `json:"name,omitempty"`
+
+ // Owner: The owner of the object. This will always be the uploader of
+ // the object.
+ Owner *ObjectOwner `json:"owner,omitempty"`
+
+ // SelfLink: The link to this object.
+ SelfLink string `json:"selfLink,omitempty"`
+
+ // Size: Content-Length of the data in bytes.
+ Size uint64 `json:"size,omitempty,string"`
+
+ // StorageClass: Storage class of the object.
+ StorageClass string `json:"storageClass,omitempty"`
+
+ // TimeCreated: The creation time of the object in RFC 3339 format.
+ TimeCreated string `json:"timeCreated,omitempty"`
+
+ // TimeDeleted: The deletion time of the object in RFC 3339 format. Will
+ // be returned if and only if this version of the object has been
+ // deleted.
+ TimeDeleted string `json:"timeDeleted,omitempty"`
+
+ // Updated: The modification time of the object metadata in RFC 3339
+ // format.
+ Updated string `json:"updated,omitempty"`
+
+ // ServerResponse contains the HTTP response code and headers from the
+ // server.
+ googleapi.ServerResponse `json:"-"`
+
+ // ForceSendFields is a list of field names (e.g. "Acl") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *Object) MarshalJSON() ([]byte, error) {
+ type noMethod Object
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// ObjectCustomerEncryption: Metadata of customer-supplied encryption
+// key, if the object is encrypted by such a key.
+type ObjectCustomerEncryption struct {
+ // EncryptionAlgorithm: The encryption algorithm.
+ EncryptionAlgorithm string `json:"encryptionAlgorithm,omitempty"`
+
+ // KeySha256: SHA256 hash value of the encryption key.
+ KeySha256 string `json:"keySha256,omitempty"`
+
+ // ForceSendFields is a list of field names (e.g. "EncryptionAlgorithm")
+ // to unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *ObjectCustomerEncryption) MarshalJSON() ([]byte, error) {
+ type noMethod ObjectCustomerEncryption
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// ObjectOwner: The owner of the object. This will always be the
+// uploader of the object.
+type ObjectOwner struct {
+ // Entity: The entity, in the form user-userId.
+ Entity string `json:"entity,omitempty"`
+
+ // EntityId: The ID for the entity.
+ EntityId string `json:"entityId,omitempty"`
+
+ // ForceSendFields is a list of field names (e.g. "Entity") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *ObjectOwner) MarshalJSON() ([]byte, error) {
+ type noMethod ObjectOwner
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// ObjectAccessControl: An access-control entry.
+type ObjectAccessControl struct {
+ // Bucket: The name of the bucket.
+ Bucket string `json:"bucket,omitempty"`
+
+ // Domain: The domain associated with the entity, if any.
+ Domain string `json:"domain,omitempty"`
+
+ // Email: The email address associated with the entity, if any.
+ Email string `json:"email,omitempty"`
+
+ // Entity: The entity holding the permission, in one of the following
+ // forms:
+ // - user-userId
+ // - user-email
+ // - group-groupId
+ // - group-email
+ // - domain-domain
+ // - project-team-projectId
+ // - allUsers
+ // - allAuthenticatedUsers Examples:
+ // - The user liz@example.com would be user-liz@example.com.
+ // - The group example@googlegroups.com would be
+ // group-example@googlegroups.com.
+ // - To refer to all members of the Google Apps for Business domain
+ // example.com, the entity would be domain-example.com.
+ Entity string `json:"entity,omitempty"`
+
+ // EntityId: The ID for the entity, if any.
+ EntityId string `json:"entityId,omitempty"`
+
+ // Etag: HTTP 1.1 Entity tag for the access-control entry.
+ Etag string `json:"etag,omitempty"`
+
+ // Generation: The content generation of the object.
+ Generation int64 `json:"generation,omitempty,string"`
+
+ // Id: The ID of the access-control entry.
+ Id string `json:"id,omitempty"`
+
+ // Kind: The kind of item this is. For object access control entries,
+ // this is always storage#objectAccessControl.
+ Kind string `json:"kind,omitempty"`
+
+ // Object: The name of the object.
+ Object string `json:"object,omitempty"`
+
+ // ProjectTeam: The project team associated with the entity, if any.
+ ProjectTeam *ObjectAccessControlProjectTeam `json:"projectTeam,omitempty"`
+
+ // Role: The access permission for the entity. Can be READER or OWNER.
+ Role string `json:"role,omitempty"`
+
+ // SelfLink: The link to this access-control entry.
+ SelfLink string `json:"selfLink,omitempty"`
+
+ // ServerResponse contains the HTTP response code and headers from the
+ // server.
+ googleapi.ServerResponse `json:"-"`
+
+ // ForceSendFields is a list of field names (e.g. "Bucket") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *ObjectAccessControl) MarshalJSON() ([]byte, error) {
+ type noMethod ObjectAccessControl
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// ObjectAccessControlProjectTeam: The project team associated with the
+// entity, if any.
+type ObjectAccessControlProjectTeam struct {
+ // ProjectNumber: The project number.
+ ProjectNumber string `json:"projectNumber,omitempty"`
+
+ // Team: The team. Can be owners, editors, or viewers.
+ Team string `json:"team,omitempty"`
+
+ // ForceSendFields is a list of field names (e.g. "ProjectNumber") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *ObjectAccessControlProjectTeam) MarshalJSON() ([]byte, error) {
+ type noMethod ObjectAccessControlProjectTeam
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// ObjectAccessControls: An access-control list.
+type ObjectAccessControls struct {
+ // Items: The list of items.
+ Items []interface{} `json:"items,omitempty"`
+
+ // Kind: The kind of item this is. For lists of object access control
+ // entries, this is always storage#objectAccessControls.
+ Kind string `json:"kind,omitempty"`
+
+ // ServerResponse contains the HTTP response code and headers from the
+ // server.
+ googleapi.ServerResponse `json:"-"`
+
+ // ForceSendFields is a list of field names (e.g. "Items") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *ObjectAccessControls) MarshalJSON() ([]byte, error) {
+ type noMethod ObjectAccessControls
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// Objects: A list of objects.
+type Objects struct {
+ // Items: The list of items.
+ Items []*Object `json:"items,omitempty"`
+
+ // Kind: The kind of item this is. For lists of objects, this is always
+ // storage#objects.
+ Kind string `json:"kind,omitempty"`
+
+ // NextPageToken: The continuation token, used to page through large
+ // result sets. Provide this value in a subsequent request to return the
+ // next page of results.
+ NextPageToken string `json:"nextPageToken,omitempty"`
+
+ // Prefixes: The list of prefixes of objects matching-but-not-listed up
+ // to and including the requested delimiter.
+ Prefixes []string `json:"prefixes,omitempty"`
+
+ // ServerResponse contains the HTTP response code and headers from the
+ // server.
+ googleapi.ServerResponse `json:"-"`
+
+ // ForceSendFields is a list of field names (e.g. "Items") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *Objects) MarshalJSON() ([]byte, error) {
+ type noMethod Objects
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// RewriteResponse: A rewrite response.
+type RewriteResponse struct {
+ // Done: true if the copy is finished; otherwise, false if the copy is
+ // in progress. This property is always present in the response.
+ Done bool `json:"done,omitempty"`
+
+ // Kind: The kind of item this is.
+ Kind string `json:"kind,omitempty"`
+
+ // ObjectSize: The total size of the object being copied in bytes. This
+ // property is always present in the response.
+ ObjectSize uint64 `json:"objectSize,omitempty,string"`
+
+ // Resource: A resource containing the metadata for the copied-to
+ // object. This property is present in the response only when copying
+ // completes.
+ Resource *Object `json:"resource,omitempty"`
+
+ // RewriteToken: A token to use in subsequent requests to continue
+ // copying data. This token is present in the response only when there
+ // is more data to copy.
+ RewriteToken string `json:"rewriteToken,omitempty"`
+
+ // TotalBytesRewritten: The total bytes written so far, which can be
+ // used to provide a waiting user with a progress indicator. This
+ // property is always present in the response.
+ TotalBytesRewritten uint64 `json:"totalBytesRewritten,omitempty,string"`
+
+ // ServerResponse contains the HTTP response code and headers from the
+ // server.
+ googleapi.ServerResponse `json:"-"`
+
+ // ForceSendFields is a list of field names (e.g. "Done") to
+ // unconditionally include in API requests. By default, fields with
+ // empty values are omitted from API requests. However, any non-pointer,
+ // non-interface field appearing in ForceSendFields will be sent to the
+ // server regardless of whether the field is empty or not. This may be
+ // used to include empty fields in Patch requests.
+ ForceSendFields []string `json:"-"`
+}
+
+func (s *RewriteResponse) MarshalJSON() ([]byte, error) {
+ type noMethod RewriteResponse
+ raw := noMethod(*s)
+ return gensupport.MarshalJSON(raw, s.ForceSendFields)
+}
+
+// method id "storage.bucketAccessControls.delete":
+
+type BucketAccessControlsDeleteCall struct {
+ s *Service
+ bucket string
+ entity string
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+}
+
+// Delete: Permanently deletes the ACL entry for the specified entity on
+// the specified bucket.
+func (r *BucketAccessControlsService) Delete(bucket string, entity string) *BucketAccessControlsDeleteCall {
+ c := &BucketAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.bucket = bucket
+ c.entity = entity
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *BucketAccessControlsDeleteCall) Fields(s ...googleapi.Field) *BucketAccessControlsDeleteCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *BucketAccessControlsDeleteCall) Context(ctx context.Context) *BucketAccessControlsDeleteCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := make(http.Header)
+ reqHeaders.Set("User-Agent", c.s.userAgent())
+ var body io.Reader = nil
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("DELETE", urls, body)
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "bucket": c.bucket,
+ "entity": c.entity,
+ })
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "storage.bucketAccessControls.delete" call.
+func (c *BucketAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if err != nil {
+ return err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return err
+ }
+ return nil
+ // {
+ // "description": "Permanently deletes the ACL entry for the specified entity on the specified bucket.",
+ // "httpMethod": "DELETE",
+ // "id": "storage.bucketAccessControls.delete",
+ // "parameterOrder": [
+ // "bucket",
+ // "entity"
+ // ],
+ // "parameters": {
+ // "bucket": {
+ // "description": "Name of a bucket.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "entity": {
+ // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "b/{bucket}/acl/{entity}",
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/devstorage.full_control"
+ // ]
+ // }
+
+}
+
+// method id "storage.bucketAccessControls.get":
+
+type BucketAccessControlsGetCall struct {
+ s *Service
+ bucket string
+ entity string
+ urlParams_ gensupport.URLParams
+ ifNoneMatch_ string
+ ctx_ context.Context
+}
+
+// Get: Returns the ACL entry for the specified entity on the specified
+// bucket.
+func (r *BucketAccessControlsService) Get(bucket string, entity string) *BucketAccessControlsGetCall {
+ c := &BucketAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.bucket = bucket
+ c.entity = entity
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *BucketAccessControlsGetCall) Fields(s ...googleapi.Field) *BucketAccessControlsGetCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// IfNoneMatch sets the optional parameter which makes the operation
+// fail if the object's ETag matches the given value. This is useful for
+// getting updates only after the object has changed since the last
+// request. Use googleapi.IsNotModified to check whether the response
+// error from Do is the result of In-None-Match.
+func (c *BucketAccessControlsGetCall) IfNoneMatch(entityTag string) *BucketAccessControlsGetCall {
+ c.ifNoneMatch_ = entityTag
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *BucketAccessControlsGetCall) Context(ctx context.Context) *BucketAccessControlsGetCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := make(http.Header)
+ reqHeaders.Set("User-Agent", c.s.userAgent())
+ if c.ifNoneMatch_ != "" {
+ reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
+ }
+ var body io.Reader = nil
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "bucket": c.bucket,
+ "entity": c.entity,
+ })
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "storage.bucketAccessControls.get" call.
+// Exactly one of *BucketAccessControl or error will be non-nil. Any
+// non-2xx status code is an error. Response headers are in either
+// *BucketAccessControl.ServerResponse.Header or (if a response was
+// returned at all) in error.(*googleapi.Error).Header. Use
+// googleapi.IsNotModified to check whether the returned error was
+// because http.StatusNotModified was returned.
+func (c *BucketAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &BucketAccessControl{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Returns the ACL entry for the specified entity on the specified bucket.",
+ // "httpMethod": "GET",
+ // "id": "storage.bucketAccessControls.get",
+ // "parameterOrder": [
+ // "bucket",
+ // "entity"
+ // ],
+ // "parameters": {
+ // "bucket": {
+ // "description": "Name of a bucket.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "entity": {
+ // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "b/{bucket}/acl/{entity}",
+ // "response": {
+ // "$ref": "BucketAccessControl"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/devstorage.full_control"
+ // ]
+ // }
+
+}
+
+// method id "storage.bucketAccessControls.insert":
+
+type BucketAccessControlsInsertCall struct {
+ s *Service
+ bucket string
+ bucketaccesscontrol *BucketAccessControl
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+}
+
+// Insert: Creates a new ACL entry on the specified bucket.
+func (r *BucketAccessControlsService) Insert(bucket string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsInsertCall {
+ c := &BucketAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.bucket = bucket
+ c.bucketaccesscontrol = bucketaccesscontrol
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *BucketAccessControlsInsertCall) Fields(s ...googleapi.Field) *BucketAccessControlsInsertCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *BucketAccessControlsInsertCall) Context(ctx context.Context) *BucketAccessControlsInsertCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := make(http.Header)
+ reqHeaders.Set("User-Agent", c.s.userAgent())
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol)
+ if err != nil {
+ return nil, err
+ }
+ reqHeaders.Set("Content-Type", "application/json")
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "bucket": c.bucket,
+ })
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "storage.bucketAccessControls.insert" call.
+// Exactly one of *BucketAccessControl or error will be non-nil. Any
+// non-2xx status code is an error. Response headers are in either
+// *BucketAccessControl.ServerResponse.Header or (if a response was
+// returned at all) in error.(*googleapi.Error).Header. Use
+// googleapi.IsNotModified to check whether the returned error was
+// because http.StatusNotModified was returned.
+func (c *BucketAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &BucketAccessControl{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Creates a new ACL entry on the specified bucket.",
+ // "httpMethod": "POST",
+ // "id": "storage.bucketAccessControls.insert",
+ // "parameterOrder": [
+ // "bucket"
+ // ],
+ // "parameters": {
+ // "bucket": {
+ // "description": "Name of a bucket.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "b/{bucket}/acl",
+ // "request": {
+ // "$ref": "BucketAccessControl"
+ // },
+ // "response": {
+ // "$ref": "BucketAccessControl"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/devstorage.full_control"
+ // ]
+ // }
+
+}
+
+// method id "storage.bucketAccessControls.list":
+
+type BucketAccessControlsListCall struct {
+ s *Service
+ bucket string
+ urlParams_ gensupport.URLParams
+ ifNoneMatch_ string
+ ctx_ context.Context
+}
+
+// List: Retrieves ACL entries on the specified bucket.
+func (r *BucketAccessControlsService) List(bucket string) *BucketAccessControlsListCall {
+ c := &BucketAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.bucket = bucket
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *BucketAccessControlsListCall) Fields(s ...googleapi.Field) *BucketAccessControlsListCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// IfNoneMatch sets the optional parameter which makes the operation
+// fail if the object's ETag matches the given value. This is useful for
+// getting updates only after the object has changed since the last
+// request. Use googleapi.IsNotModified to check whether the response
+// error from Do is the result of In-None-Match.
+func (c *BucketAccessControlsListCall) IfNoneMatch(entityTag string) *BucketAccessControlsListCall {
+ c.ifNoneMatch_ = entityTag
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *BucketAccessControlsListCall) Context(ctx context.Context) *BucketAccessControlsListCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := make(http.Header)
+ reqHeaders.Set("User-Agent", c.s.userAgent())
+ if c.ifNoneMatch_ != "" {
+ reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
+ }
+ var body io.Reader = nil
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "bucket": c.bucket,
+ })
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "storage.bucketAccessControls.list" call.
+// Exactly one of *BucketAccessControls or error will be non-nil. Any
+// non-2xx status code is an error. Response headers are in either
+// *BucketAccessControls.ServerResponse.Header or (if a response was
+// returned at all) in error.(*googleapi.Error).Header. Use
+// googleapi.IsNotModified to check whether the returned error was
+// because http.StatusNotModified was returned.
+func (c *BucketAccessControlsListCall) Do(opts ...googleapi.CallOption) (*BucketAccessControls, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &BucketAccessControls{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves ACL entries on the specified bucket.",
+ // "httpMethod": "GET",
+ // "id": "storage.bucketAccessControls.list",
+ // "parameterOrder": [
+ // "bucket"
+ // ],
+ // "parameters": {
+ // "bucket": {
+ // "description": "Name of a bucket.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "b/{bucket}/acl",
+ // "response": {
+ // "$ref": "BucketAccessControls"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/devstorage.full_control"
+ // ]
+ // }
+
+}
+
+// method id "storage.bucketAccessControls.patch":
+
+type BucketAccessControlsPatchCall struct {
+ s *Service
+ bucket string
+ entity string
+ bucketaccesscontrol *BucketAccessControl
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+}
+
+// Patch: Updates an ACL entry on the specified bucket. This method
+// supports patch semantics.
+func (r *BucketAccessControlsService) Patch(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsPatchCall {
+ c := &BucketAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.bucket = bucket
+ c.entity = entity
+ c.bucketaccesscontrol = bucketaccesscontrol
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *BucketAccessControlsPatchCall) Fields(s ...googleapi.Field) *BucketAccessControlsPatchCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *BucketAccessControlsPatchCall) Context(ctx context.Context) *BucketAccessControlsPatchCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := make(http.Header)
+ reqHeaders.Set("User-Agent", c.s.userAgent())
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol)
+ if err != nil {
+ return nil, err
+ }
+ reqHeaders.Set("Content-Type", "application/json")
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("PATCH", urls, body)
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "bucket": c.bucket,
+ "entity": c.entity,
+ })
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "storage.bucketAccessControls.patch" call.
+// Exactly one of *BucketAccessControl or error will be non-nil. Any
+// non-2xx status code is an error. Response headers are in either
+// *BucketAccessControl.ServerResponse.Header or (if a response was
+// returned at all) in error.(*googleapi.Error).Header. Use
+// googleapi.IsNotModified to check whether the returned error was
+// because http.StatusNotModified was returned.
+func (c *BucketAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &BucketAccessControl{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Updates an ACL entry on the specified bucket. This method supports patch semantics.",
+ // "httpMethod": "PATCH",
+ // "id": "storage.bucketAccessControls.patch",
+ // "parameterOrder": [
+ // "bucket",
+ // "entity"
+ // ],
+ // "parameters": {
+ // "bucket": {
+ // "description": "Name of a bucket.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "entity": {
+ // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "b/{bucket}/acl/{entity}",
+ // "request": {
+ // "$ref": "BucketAccessControl"
+ // },
+ // "response": {
+ // "$ref": "BucketAccessControl"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/devstorage.full_control"
+ // ]
+ // }
+
+}
+
+// method id "storage.bucketAccessControls.update":
+
+type BucketAccessControlsUpdateCall struct {
+ s *Service
+ bucket string
+ entity string
+ bucketaccesscontrol *BucketAccessControl
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+}
+
+// Update: Updates an ACL entry on the specified bucket.
+func (r *BucketAccessControlsService) Update(bucket string, entity string, bucketaccesscontrol *BucketAccessControl) *BucketAccessControlsUpdateCall {
+ c := &BucketAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.bucket = bucket
+ c.entity = entity
+ c.bucketaccesscontrol = bucketaccesscontrol
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *BucketAccessControlsUpdateCall) Fields(s ...googleapi.Field) *BucketAccessControlsUpdateCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *BucketAccessControlsUpdateCall) Context(ctx context.Context) *BucketAccessControlsUpdateCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := make(http.Header)
+ reqHeaders.Set("User-Agent", c.s.userAgent())
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucketaccesscontrol)
+ if err != nil {
+ return nil, err
+ }
+ reqHeaders.Set("Content-Type", "application/json")
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/acl/{entity}")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("PUT", urls, body)
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "bucket": c.bucket,
+ "entity": c.entity,
+ })
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "storage.bucketAccessControls.update" call.
+// Exactly one of *BucketAccessControl or error will be non-nil. Any
+// non-2xx status code is an error. Response headers are in either
+// *BucketAccessControl.ServerResponse.Header or (if a response was
+// returned at all) in error.(*googleapi.Error).Header. Use
+// googleapi.IsNotModified to check whether the returned error was
+// because http.StatusNotModified was returned.
+func (c *BucketAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*BucketAccessControl, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &BucketAccessControl{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Updates an ACL entry on the specified bucket.",
+ // "httpMethod": "PUT",
+ // "id": "storage.bucketAccessControls.update",
+ // "parameterOrder": [
+ // "bucket",
+ // "entity"
+ // ],
+ // "parameters": {
+ // "bucket": {
+ // "description": "Name of a bucket.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "entity": {
+ // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "b/{bucket}/acl/{entity}",
+ // "request": {
+ // "$ref": "BucketAccessControl"
+ // },
+ // "response": {
+ // "$ref": "BucketAccessControl"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/devstorage.full_control"
+ // ]
+ // }
+
+}
+
+// method id "storage.buckets.delete":
+
+type BucketsDeleteCall struct {
+ s *Service
+ bucket string
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+}
+
+// Delete: Permanently deletes an empty bucket.
+func (r *BucketsService) Delete(bucket string) *BucketsDeleteCall {
+ c := &BucketsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.bucket = bucket
+ return c
+}
+
+// IfMetagenerationMatch sets the optional parameter
+// "ifMetagenerationMatch": If set, only deletes the bucket if its
+// metageneration matches this value.
+func (c *BucketsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsDeleteCall {
+ c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
+ return c
+}
+
+// IfMetagenerationNotMatch sets the optional parameter
+// "ifMetagenerationNotMatch": If set, only deletes the bucket if its
+// metageneration does not match this value.
+func (c *BucketsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsDeleteCall {
+ c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *BucketsDeleteCall) Fields(s ...googleapi.Field) *BucketsDeleteCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *BucketsDeleteCall) Context(ctx context.Context) *BucketsDeleteCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := make(http.Header)
+ reqHeaders.Set("User-Agent", c.s.userAgent())
+ var body io.Reader = nil
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("DELETE", urls, body)
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "bucket": c.bucket,
+ })
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "storage.buckets.delete" call.
+func (c *BucketsDeleteCall) Do(opts ...googleapi.CallOption) error {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if err != nil {
+ return err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return err
+ }
+ return nil
+ // {
+ // "description": "Permanently deletes an empty bucket.",
+ // "httpMethod": "DELETE",
+ // "id": "storage.buckets.delete",
+ // "parameterOrder": [
+ // "bucket"
+ // ],
+ // "parameters": {
+ // "bucket": {
+ // "description": "Name of a bucket.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "ifMetagenerationMatch": {
+ // "description": "If set, only deletes the bucket if its metageneration matches this value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifMetagenerationNotMatch": {
+ // "description": "If set, only deletes the bucket if its metageneration does not match this value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // }
+ // },
+ // "path": "b/{bucket}",
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/devstorage.full_control",
+ // "https://www.googleapis.com/auth/devstorage.read_write"
+ // ]
+ // }
+
+}
+
+// method id "storage.buckets.get":
+
+type BucketsGetCall struct {
+ s *Service
+ bucket string
+ urlParams_ gensupport.URLParams
+ ifNoneMatch_ string
+ ctx_ context.Context
+}
+
+// Get: Returns metadata for the specified bucket.
+func (r *BucketsService) Get(bucket string) *BucketsGetCall {
+ c := &BucketsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.bucket = bucket
+ return c
+}
+
+// IfMetagenerationMatch sets the optional parameter
+// "ifMetagenerationMatch": Makes the return of the bucket metadata
+// conditional on whether the bucket's current metageneration matches
+// the given value.
+func (c *BucketsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsGetCall {
+ c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
+ return c
+}
+
+// IfMetagenerationNotMatch sets the optional parameter
+// "ifMetagenerationNotMatch": Makes the return of the bucket metadata
+// conditional on whether the bucket's current metageneration does not
+// match the given value.
+func (c *BucketsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsGetCall {
+ c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
+ return c
+}
+
+// Projection sets the optional parameter "projection": Set of
+// properties to return. Defaults to noAcl.
+//
+// Possible values:
+// "full" - Include all properties.
+// "noAcl" - Omit owner, acl and defaultObjectAcl properties.
+func (c *BucketsGetCall) Projection(projection string) *BucketsGetCall {
+ c.urlParams_.Set("projection", projection)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *BucketsGetCall) Fields(s ...googleapi.Field) *BucketsGetCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// IfNoneMatch sets the optional parameter which makes the operation
+// fail if the object's ETag matches the given value. This is useful for
+// getting updates only after the object has changed since the last
+// request. Use googleapi.IsNotModified to check whether the response
+// error from Do is the result of In-None-Match.
+func (c *BucketsGetCall) IfNoneMatch(entityTag string) *BucketsGetCall {
+ c.ifNoneMatch_ = entityTag
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *BucketsGetCall) Context(ctx context.Context) *BucketsGetCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := make(http.Header)
+ reqHeaders.Set("User-Agent", c.s.userAgent())
+ if c.ifNoneMatch_ != "" {
+ reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
+ }
+ var body io.Reader = nil
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "bucket": c.bucket,
+ })
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "storage.buckets.get" call.
+// Exactly one of *Bucket or error will be non-nil. Any non-2xx status
+// code is an error. Response headers are in either
+// *Bucket.ServerResponse.Header or (if a response was returned at all)
+// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
+// check whether the returned error was because http.StatusNotModified
+// was returned.
+func (c *BucketsGetCall) Do(opts ...googleapi.CallOption) (*Bucket, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &Bucket{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Returns metadata for the specified bucket.",
+ // "httpMethod": "GET",
+ // "id": "storage.buckets.get",
+ // "parameterOrder": [
+ // "bucket"
+ // ],
+ // "parameters": {
+ // "bucket": {
+ // "description": "Name of a bucket.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "ifMetagenerationMatch": {
+ // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifMetagenerationNotMatch": {
+ // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "projection": {
+ // "description": "Set of properties to return. Defaults to noAcl.",
+ // "enum": [
+ // "full",
+ // "noAcl"
+ // ],
+ // "enumDescriptions": [
+ // "Include all properties.",
+ // "Omit owner, acl and defaultObjectAcl properties."
+ // ],
+ // "location": "query",
+ // "type": "string"
+ // }
+ // },
+ // "path": "b/{bucket}",
+ // "response": {
+ // "$ref": "Bucket"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/cloud-platform.read-only",
+ // "https://www.googleapis.com/auth/devstorage.full_control",
+ // "https://www.googleapis.com/auth/devstorage.read_only",
+ // "https://www.googleapis.com/auth/devstorage.read_write"
+ // ]
+ // }
+
+}
+
+// method id "storage.buckets.insert":
+
+type BucketsInsertCall struct {
+ s *Service
+ bucket *Bucket
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+}
+
+// Insert: Creates a new bucket.
+func (r *BucketsService) Insert(projectid string, bucket *Bucket) *BucketsInsertCall {
+ c := &BucketsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.urlParams_.Set("project", projectid)
+ c.bucket = bucket
+ return c
+}
+
+// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a
+// predefined set of access controls to this bucket.
+//
+// Possible values:
+// "authenticatedRead" - Project team owners get OWNER access, and
+// allAuthenticatedUsers get READER access.
+// "private" - Project team owners get OWNER access.
+// "projectPrivate" - Project team members get access according to
+// their roles.
+// "publicRead" - Project team owners get OWNER access, and allUsers
+// get READER access.
+// "publicReadWrite" - Project team owners get OWNER access, and
+// allUsers get WRITER access.
+func (c *BucketsInsertCall) PredefinedAcl(predefinedAcl string) *BucketsInsertCall {
+ c.urlParams_.Set("predefinedAcl", predefinedAcl)
+ return c
+}
+
+// PredefinedDefaultObjectAcl sets the optional parameter
+// "predefinedDefaultObjectAcl": Apply a predefined set of default
+// object access controls to this bucket.
+//
+// Possible values:
+// "authenticatedRead" - Object owner gets OWNER access, and
+// allAuthenticatedUsers get READER access.
+// "bucketOwnerFullControl" - Object owner gets OWNER access, and
+// project team owners get OWNER access.
+// "bucketOwnerRead" - Object owner gets OWNER access, and project
+// team owners get READER access.
+// "private" - Object owner gets OWNER access.
+// "projectPrivate" - Object owner gets OWNER access, and project team
+// members get access according to their roles.
+// "publicRead" - Object owner gets OWNER access, and allUsers get
+// READER access.
+func (c *BucketsInsertCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsInsertCall {
+ c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl)
+ return c
+}
+
+// Projection sets the optional parameter "projection": Set of
+// properties to return. Defaults to noAcl, unless the bucket resource
+// specifies acl or defaultObjectAcl properties, when it defaults to
+// full.
+//
+// Possible values:
+// "full" - Include all properties.
+// "noAcl" - Omit owner, acl and defaultObjectAcl properties.
+func (c *BucketsInsertCall) Projection(projection string) *BucketsInsertCall {
+ c.urlParams_.Set("projection", projection)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *BucketsInsertCall) Fields(s ...googleapi.Field) *BucketsInsertCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *BucketsInsertCall) Context(ctx context.Context) *BucketsInsertCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := make(http.Header)
+ reqHeaders.Set("User-Agent", c.s.userAgent())
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket)
+ if err != nil {
+ return nil, err
+ }
+ reqHeaders.Set("Content-Type", "application/json")
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "b")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ req.Header = reqHeaders
+ googleapi.SetOpaque(req.URL)
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "storage.buckets.insert" call.
+// Exactly one of *Bucket or error will be non-nil. Any non-2xx status
+// code is an error. Response headers are in either
+// *Bucket.ServerResponse.Header or (if a response was returned at all)
+// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
+// check whether the returned error was because http.StatusNotModified
+// was returned.
+func (c *BucketsInsertCall) Do(opts ...googleapi.CallOption) (*Bucket, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &Bucket{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Creates a new bucket.",
+ // "httpMethod": "POST",
+ // "id": "storage.buckets.insert",
+ // "parameterOrder": [
+ // "project"
+ // ],
+ // "parameters": {
+ // "predefinedAcl": {
+ // "description": "Apply a predefined set of access controls to this bucket.",
+ // "enum": [
+ // "authenticatedRead",
+ // "private",
+ // "projectPrivate",
+ // "publicRead",
+ // "publicReadWrite"
+ // ],
+ // "enumDescriptions": [
+ // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.",
+ // "Project team owners get OWNER access.",
+ // "Project team members get access according to their roles.",
+ // "Project team owners get OWNER access, and allUsers get READER access.",
+ // "Project team owners get OWNER access, and allUsers get WRITER access."
+ // ],
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "predefinedDefaultObjectAcl": {
+ // "description": "Apply a predefined set of default object access controls to this bucket.",
+ // "enum": [
+ // "authenticatedRead",
+ // "bucketOwnerFullControl",
+ // "bucketOwnerRead",
+ // "private",
+ // "projectPrivate",
+ // "publicRead"
+ // ],
+ // "enumDescriptions": [
+ // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
+ // "Object owner gets OWNER access, and project team owners get OWNER access.",
+ // "Object owner gets OWNER access, and project team owners get READER access.",
+ // "Object owner gets OWNER access.",
+ // "Object owner gets OWNER access, and project team members get access according to their roles.",
+ // "Object owner gets OWNER access, and allUsers get READER access."
+ // ],
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "A valid API project identifier.",
+ // "location": "query",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "projection": {
+ // "description": "Set of properties to return. Defaults to noAcl, unless the bucket resource specifies acl or defaultObjectAcl properties, when it defaults to full.",
+ // "enum": [
+ // "full",
+ // "noAcl"
+ // ],
+ // "enumDescriptions": [
+ // "Include all properties.",
+ // "Omit owner, acl and defaultObjectAcl properties."
+ // ],
+ // "location": "query",
+ // "type": "string"
+ // }
+ // },
+ // "path": "b",
+ // "request": {
+ // "$ref": "Bucket"
+ // },
+ // "response": {
+ // "$ref": "Bucket"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/devstorage.full_control",
+ // "https://www.googleapis.com/auth/devstorage.read_write"
+ // ]
+ // }
+
+}
+
+// method id "storage.buckets.list":
+
+type BucketsListCall struct {
+ s *Service
+ urlParams_ gensupport.URLParams
+ ifNoneMatch_ string
+ ctx_ context.Context
+}
+
+// List: Retrieves a list of buckets for a given project.
+func (r *BucketsService) List(projectid string) *BucketsListCall {
+ c := &BucketsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.urlParams_.Set("project", projectid)
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum number
+// of buckets to return.
+func (c *BucketsListCall) MaxResults(maxResults int64) *BucketsListCall {
+ c.urlParams_.Set("maxResults", fmt.Sprint(maxResults))
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": A
+// previously-returned page token representing part of the larger set of
+// results to view.
+func (c *BucketsListCall) PageToken(pageToken string) *BucketsListCall {
+ c.urlParams_.Set("pageToken", pageToken)
+ return c
+}
+
+// Prefix sets the optional parameter "prefix": Filter results to
+// buckets whose names begin with this prefix.
+func (c *BucketsListCall) Prefix(prefix string) *BucketsListCall {
+ c.urlParams_.Set("prefix", prefix)
+ return c
+}
+
+// Projection sets the optional parameter "projection": Set of
+// properties to return. Defaults to noAcl.
+//
+// Possible values:
+// "full" - Include all properties.
+// "noAcl" - Omit owner, acl and defaultObjectAcl properties.
+func (c *BucketsListCall) Projection(projection string) *BucketsListCall {
+ c.urlParams_.Set("projection", projection)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *BucketsListCall) Fields(s ...googleapi.Field) *BucketsListCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// IfNoneMatch sets the optional parameter which makes the operation
+// fail if the object's ETag matches the given value. This is useful for
+// getting updates only after the object has changed since the last
+// request. Use googleapi.IsNotModified to check whether the response
+// error from Do is the result of In-None-Match.
+func (c *BucketsListCall) IfNoneMatch(entityTag string) *BucketsListCall {
+ c.ifNoneMatch_ = entityTag
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *BucketsListCall) Context(ctx context.Context) *BucketsListCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := make(http.Header)
+ reqHeaders.Set("User-Agent", c.s.userAgent())
+ if c.ifNoneMatch_ != "" {
+ reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
+ }
+ var body io.Reader = nil
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "b")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ req.Header = reqHeaders
+ googleapi.SetOpaque(req.URL)
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "storage.buckets.list" call.
+// Exactly one of *Buckets or error will be non-nil. Any non-2xx status
+// code is an error. Response headers are in either
+// *Buckets.ServerResponse.Header or (if a response was returned at all)
+// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
+// check whether the returned error was because http.StatusNotModified
+// was returned.
+func (c *BucketsListCall) Do(opts ...googleapi.CallOption) (*Buckets, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &Buckets{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves a list of buckets for a given project.",
+ // "httpMethod": "GET",
+ // "id": "storage.buckets.list",
+ // "parameterOrder": [
+ // "project"
+ // ],
+ // "parameters": {
+ // "maxResults": {
+ // "description": "Maximum number of buckets to return.",
+ // "format": "uint32",
+ // "location": "query",
+ // "minimum": "0",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "A previously-returned page token representing part of the larger set of results to view.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "prefix": {
+ // "description": "Filter results to buckets whose names begin with this prefix.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "A valid API project identifier.",
+ // "location": "query",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "projection": {
+ // "description": "Set of properties to return. Defaults to noAcl.",
+ // "enum": [
+ // "full",
+ // "noAcl"
+ // ],
+ // "enumDescriptions": [
+ // "Include all properties.",
+ // "Omit owner, acl and defaultObjectAcl properties."
+ // ],
+ // "location": "query",
+ // "type": "string"
+ // }
+ // },
+ // "path": "b",
+ // "response": {
+ // "$ref": "Buckets"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/cloud-platform.read-only",
+ // "https://www.googleapis.com/auth/devstorage.full_control",
+ // "https://www.googleapis.com/auth/devstorage.read_only",
+ // "https://www.googleapis.com/auth/devstorage.read_write"
+ // ]
+ // }
+
+}
+
+// Pages invokes f for each page of results.
+// A non-nil error returned from f will halt the iteration.
+// The provided context supersedes any context provided to the Context method.
+func (c *BucketsListCall) Pages(ctx context.Context, f func(*Buckets) error) error {
+ c.ctx_ = ctx
+ defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
+ for {
+ x, err := c.Do()
+ if err != nil {
+ return err
+ }
+ if err := f(x); err != nil {
+ return err
+ }
+ if x.NextPageToken == "" {
+ return nil
+ }
+ c.PageToken(x.NextPageToken)
+ }
+}
+
+// method id "storage.buckets.patch":
+
+type BucketsPatchCall struct {
+ s *Service
+ bucket string
+ bucket2 *Bucket
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+}
+
+// Patch: Updates a bucket. This method supports patch semantics.
+func (r *BucketsService) Patch(bucket string, bucket2 *Bucket) *BucketsPatchCall {
+ c := &BucketsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.bucket = bucket
+ c.bucket2 = bucket2
+ return c
+}
+
+// IfMetagenerationMatch sets the optional parameter
+// "ifMetagenerationMatch": Makes the return of the bucket metadata
+// conditional on whether the bucket's current metageneration matches
+// the given value.
+func (c *BucketsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsPatchCall {
+ c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
+ return c
+}
+
+// IfMetagenerationNotMatch sets the optional parameter
+// "ifMetagenerationNotMatch": Makes the return of the bucket metadata
+// conditional on whether the bucket's current metageneration does not
+// match the given value.
+func (c *BucketsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsPatchCall {
+ c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
+ return c
+}
+
+// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a
+// predefined set of access controls to this bucket.
+//
+// Possible values:
+// "authenticatedRead" - Project team owners get OWNER access, and
+// allAuthenticatedUsers get READER access.
+// "private" - Project team owners get OWNER access.
+// "projectPrivate" - Project team members get access according to
+// their roles.
+// "publicRead" - Project team owners get OWNER access, and allUsers
+// get READER access.
+// "publicReadWrite" - Project team owners get OWNER access, and
+// allUsers get WRITER access.
+func (c *BucketsPatchCall) PredefinedAcl(predefinedAcl string) *BucketsPatchCall {
+ c.urlParams_.Set("predefinedAcl", predefinedAcl)
+ return c
+}
+
+// PredefinedDefaultObjectAcl sets the optional parameter
+// "predefinedDefaultObjectAcl": Apply a predefined set of default
+// object access controls to this bucket.
+//
+// Possible values:
+// "authenticatedRead" - Object owner gets OWNER access, and
+// allAuthenticatedUsers get READER access.
+// "bucketOwnerFullControl" - Object owner gets OWNER access, and
+// project team owners get OWNER access.
+// "bucketOwnerRead" - Object owner gets OWNER access, and project
+// team owners get READER access.
+// "private" - Object owner gets OWNER access.
+// "projectPrivate" - Object owner gets OWNER access, and project team
+// members get access according to their roles.
+// "publicRead" - Object owner gets OWNER access, and allUsers get
+// READER access.
+func (c *BucketsPatchCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsPatchCall {
+ c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl)
+ return c
+}
+
+// Projection sets the optional parameter "projection": Set of
+// properties to return. Defaults to full.
+//
+// Possible values:
+// "full" - Include all properties.
+// "noAcl" - Omit owner, acl and defaultObjectAcl properties.
+func (c *BucketsPatchCall) Projection(projection string) *BucketsPatchCall {
+ c.urlParams_.Set("projection", projection)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *BucketsPatchCall) Fields(s ...googleapi.Field) *BucketsPatchCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *BucketsPatchCall) Context(ctx context.Context) *BucketsPatchCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := make(http.Header)
+ reqHeaders.Set("User-Agent", c.s.userAgent())
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2)
+ if err != nil {
+ return nil, err
+ }
+ reqHeaders.Set("Content-Type", "application/json")
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("PATCH", urls, body)
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "bucket": c.bucket,
+ })
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "storage.buckets.patch" call.
+// Exactly one of *Bucket or error will be non-nil. Any non-2xx status
+// code is an error. Response headers are in either
+// *Bucket.ServerResponse.Header or (if a response was returned at all)
+// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
+// check whether the returned error was because http.StatusNotModified
+// was returned.
+func (c *BucketsPatchCall) Do(opts ...googleapi.CallOption) (*Bucket, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &Bucket{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Updates a bucket. This method supports patch semantics.",
+ // "httpMethod": "PATCH",
+ // "id": "storage.buckets.patch",
+ // "parameterOrder": [
+ // "bucket"
+ // ],
+ // "parameters": {
+ // "bucket": {
+ // "description": "Name of a bucket.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "ifMetagenerationMatch": {
+ // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifMetagenerationNotMatch": {
+ // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "predefinedAcl": {
+ // "description": "Apply a predefined set of access controls to this bucket.",
+ // "enum": [
+ // "authenticatedRead",
+ // "private",
+ // "projectPrivate",
+ // "publicRead",
+ // "publicReadWrite"
+ // ],
+ // "enumDescriptions": [
+ // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.",
+ // "Project team owners get OWNER access.",
+ // "Project team members get access according to their roles.",
+ // "Project team owners get OWNER access, and allUsers get READER access.",
+ // "Project team owners get OWNER access, and allUsers get WRITER access."
+ // ],
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "predefinedDefaultObjectAcl": {
+ // "description": "Apply a predefined set of default object access controls to this bucket.",
+ // "enum": [
+ // "authenticatedRead",
+ // "bucketOwnerFullControl",
+ // "bucketOwnerRead",
+ // "private",
+ // "projectPrivate",
+ // "publicRead"
+ // ],
+ // "enumDescriptions": [
+ // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
+ // "Object owner gets OWNER access, and project team owners get OWNER access.",
+ // "Object owner gets OWNER access, and project team owners get READER access.",
+ // "Object owner gets OWNER access.",
+ // "Object owner gets OWNER access, and project team members get access according to their roles.",
+ // "Object owner gets OWNER access, and allUsers get READER access."
+ // ],
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "projection": {
+ // "description": "Set of properties to return. Defaults to full.",
+ // "enum": [
+ // "full",
+ // "noAcl"
+ // ],
+ // "enumDescriptions": [
+ // "Include all properties.",
+ // "Omit owner, acl and defaultObjectAcl properties."
+ // ],
+ // "location": "query",
+ // "type": "string"
+ // }
+ // },
+ // "path": "b/{bucket}",
+ // "request": {
+ // "$ref": "Bucket"
+ // },
+ // "response": {
+ // "$ref": "Bucket"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/devstorage.full_control"
+ // ]
+ // }
+
+}
+
+// method id "storage.buckets.update":
+
+type BucketsUpdateCall struct {
+ s *Service
+ bucket string
+ bucket2 *Bucket
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+}
+
+// Update: Updates a bucket.
+func (r *BucketsService) Update(bucket string, bucket2 *Bucket) *BucketsUpdateCall {
+ c := &BucketsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.bucket = bucket
+ c.bucket2 = bucket2
+ return c
+}
+
+// IfMetagenerationMatch sets the optional parameter
+// "ifMetagenerationMatch": Makes the return of the bucket metadata
+// conditional on whether the bucket's current metageneration matches
+// the given value.
+func (c *BucketsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *BucketsUpdateCall {
+ c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
+ return c
+}
+
+// IfMetagenerationNotMatch sets the optional parameter
+// "ifMetagenerationNotMatch": Makes the return of the bucket metadata
+// conditional on whether the bucket's current metageneration does not
+// match the given value.
+func (c *BucketsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *BucketsUpdateCall {
+ c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
+ return c
+}
+
+// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a
+// predefined set of access controls to this bucket.
+//
+// Possible values:
+// "authenticatedRead" - Project team owners get OWNER access, and
+// allAuthenticatedUsers get READER access.
+// "private" - Project team owners get OWNER access.
+// "projectPrivate" - Project team members get access according to
+// their roles.
+// "publicRead" - Project team owners get OWNER access, and allUsers
+// get READER access.
+// "publicReadWrite" - Project team owners get OWNER access, and
+// allUsers get WRITER access.
+func (c *BucketsUpdateCall) PredefinedAcl(predefinedAcl string) *BucketsUpdateCall {
+ c.urlParams_.Set("predefinedAcl", predefinedAcl)
+ return c
+}
+
+// PredefinedDefaultObjectAcl sets the optional parameter
+// "predefinedDefaultObjectAcl": Apply a predefined set of default
+// object access controls to this bucket.
+//
+// Possible values:
+// "authenticatedRead" - Object owner gets OWNER access, and
+// allAuthenticatedUsers get READER access.
+// "bucketOwnerFullControl" - Object owner gets OWNER access, and
+// project team owners get OWNER access.
+// "bucketOwnerRead" - Object owner gets OWNER access, and project
+// team owners get READER access.
+// "private" - Object owner gets OWNER access.
+// "projectPrivate" - Object owner gets OWNER access, and project team
+// members get access according to their roles.
+// "publicRead" - Object owner gets OWNER access, and allUsers get
+// READER access.
+func (c *BucketsUpdateCall) PredefinedDefaultObjectAcl(predefinedDefaultObjectAcl string) *BucketsUpdateCall {
+ c.urlParams_.Set("predefinedDefaultObjectAcl", predefinedDefaultObjectAcl)
+ return c
+}
+
+// Projection sets the optional parameter "projection": Set of
+// properties to return. Defaults to full.
+//
+// Possible values:
+// "full" - Include all properties.
+// "noAcl" - Omit owner, acl and defaultObjectAcl properties.
+func (c *BucketsUpdateCall) Projection(projection string) *BucketsUpdateCall {
+ c.urlParams_.Set("projection", projection)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *BucketsUpdateCall) Fields(s ...googleapi.Field) *BucketsUpdateCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *BucketsUpdateCall) Context(ctx context.Context) *BucketsUpdateCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := make(http.Header)
+ reqHeaders.Set("User-Agent", c.s.userAgent())
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.bucket2)
+ if err != nil {
+ return nil, err
+ }
+ reqHeaders.Set("Content-Type", "application/json")
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("PUT", urls, body)
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "bucket": c.bucket,
+ })
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "storage.buckets.update" call.
+// Exactly one of *Bucket or error will be non-nil. Any non-2xx status
+// code is an error. Response headers are in either
+// *Bucket.ServerResponse.Header or (if a response was returned at all)
+// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
+// check whether the returned error was because http.StatusNotModified
+// was returned.
+func (c *BucketsUpdateCall) Do(opts ...googleapi.CallOption) (*Bucket, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &Bucket{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Updates a bucket.",
+ // "httpMethod": "PUT",
+ // "id": "storage.buckets.update",
+ // "parameterOrder": [
+ // "bucket"
+ // ],
+ // "parameters": {
+ // "bucket": {
+ // "description": "Name of a bucket.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "ifMetagenerationMatch": {
+ // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration matches the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifMetagenerationNotMatch": {
+ // "description": "Makes the return of the bucket metadata conditional on whether the bucket's current metageneration does not match the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "predefinedAcl": {
+ // "description": "Apply a predefined set of access controls to this bucket.",
+ // "enum": [
+ // "authenticatedRead",
+ // "private",
+ // "projectPrivate",
+ // "publicRead",
+ // "publicReadWrite"
+ // ],
+ // "enumDescriptions": [
+ // "Project team owners get OWNER access, and allAuthenticatedUsers get READER access.",
+ // "Project team owners get OWNER access.",
+ // "Project team members get access according to their roles.",
+ // "Project team owners get OWNER access, and allUsers get READER access.",
+ // "Project team owners get OWNER access, and allUsers get WRITER access."
+ // ],
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "predefinedDefaultObjectAcl": {
+ // "description": "Apply a predefined set of default object access controls to this bucket.",
+ // "enum": [
+ // "authenticatedRead",
+ // "bucketOwnerFullControl",
+ // "bucketOwnerRead",
+ // "private",
+ // "projectPrivate",
+ // "publicRead"
+ // ],
+ // "enumDescriptions": [
+ // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
+ // "Object owner gets OWNER access, and project team owners get OWNER access.",
+ // "Object owner gets OWNER access, and project team owners get READER access.",
+ // "Object owner gets OWNER access.",
+ // "Object owner gets OWNER access, and project team members get access according to their roles.",
+ // "Object owner gets OWNER access, and allUsers get READER access."
+ // ],
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "projection": {
+ // "description": "Set of properties to return. Defaults to full.",
+ // "enum": [
+ // "full",
+ // "noAcl"
+ // ],
+ // "enumDescriptions": [
+ // "Include all properties.",
+ // "Omit owner, acl and defaultObjectAcl properties."
+ // ],
+ // "location": "query",
+ // "type": "string"
+ // }
+ // },
+ // "path": "b/{bucket}",
+ // "request": {
+ // "$ref": "Bucket"
+ // },
+ // "response": {
+ // "$ref": "Bucket"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/devstorage.full_control"
+ // ]
+ // }
+
+}
+
+// method id "storage.channels.stop":
+
+type ChannelsStopCall struct {
+ s *Service
+ channel *Channel
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+}
+
+// Stop: Stop watching resources through this channel
+func (r *ChannelsService) Stop(channel *Channel) *ChannelsStopCall {
+ c := &ChannelsStopCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.channel = channel
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ChannelsStopCall) Fields(s ...googleapi.Field) *ChannelsStopCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *ChannelsStopCall) Context(ctx context.Context) *ChannelsStopCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := make(http.Header)
+ reqHeaders.Set("User-Agent", c.s.userAgent())
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel)
+ if err != nil {
+ return nil, err
+ }
+ reqHeaders.Set("Content-Type", "application/json")
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "channels/stop")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ req.Header = reqHeaders
+ googleapi.SetOpaque(req.URL)
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "storage.channels.stop" call.
+func (c *ChannelsStopCall) Do(opts ...googleapi.CallOption) error {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if err != nil {
+ return err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return err
+ }
+ return nil
+ // {
+ // "description": "Stop watching resources through this channel",
+ // "httpMethod": "POST",
+ // "id": "storage.channels.stop",
+ // "path": "channels/stop",
+ // "request": {
+ // "$ref": "Channel",
+ // "parameterName": "resource"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/cloud-platform.read-only",
+ // "https://www.googleapis.com/auth/devstorage.full_control",
+ // "https://www.googleapis.com/auth/devstorage.read_only",
+ // "https://www.googleapis.com/auth/devstorage.read_write"
+ // ]
+ // }
+
+}
+
+// method id "storage.defaultObjectAccessControls.delete":
+
+type DefaultObjectAccessControlsDeleteCall struct {
+ s *Service
+ bucket string
+ entity string
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+}
+
+// Delete: Permanently deletes the default object ACL entry for the
+// specified entity on the specified bucket.
+func (r *DefaultObjectAccessControlsService) Delete(bucket string, entity string) *DefaultObjectAccessControlsDeleteCall {
+ c := &DefaultObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.bucket = bucket
+ c.entity = entity
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *DefaultObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsDeleteCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *DefaultObjectAccessControlsDeleteCall) Context(ctx context.Context) *DefaultObjectAccessControlsDeleteCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := make(http.Header)
+ reqHeaders.Set("User-Agent", c.s.userAgent())
+ var body io.Reader = nil
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("DELETE", urls, body)
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "bucket": c.bucket,
+ "entity": c.entity,
+ })
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "storage.defaultObjectAccessControls.delete" call.
+func (c *DefaultObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if err != nil {
+ return err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return err
+ }
+ return nil
+ // {
+ // "description": "Permanently deletes the default object ACL entry for the specified entity on the specified bucket.",
+ // "httpMethod": "DELETE",
+ // "id": "storage.defaultObjectAccessControls.delete",
+ // "parameterOrder": [
+ // "bucket",
+ // "entity"
+ // ],
+ // "parameters": {
+ // "bucket": {
+ // "description": "Name of a bucket.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "entity": {
+ // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "b/{bucket}/defaultObjectAcl/{entity}",
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/devstorage.full_control"
+ // ]
+ // }
+
+}
+
+// method id "storage.defaultObjectAccessControls.get":
+
+type DefaultObjectAccessControlsGetCall struct {
+ s *Service
+ bucket string
+ entity string
+ urlParams_ gensupport.URLParams
+ ifNoneMatch_ string
+ ctx_ context.Context
+}
+
+// Get: Returns the default object ACL entry for the specified entity on
+// the specified bucket.
+func (r *DefaultObjectAccessControlsService) Get(bucket string, entity string) *DefaultObjectAccessControlsGetCall {
+ c := &DefaultObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.bucket = bucket
+ c.entity = entity
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *DefaultObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsGetCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// IfNoneMatch sets the optional parameter which makes the operation
+// fail if the object's ETag matches the given value. This is useful for
+// getting updates only after the object has changed since the last
+// request. Use googleapi.IsNotModified to check whether the response
+// error from Do is the result of In-None-Match.
+func (c *DefaultObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *DefaultObjectAccessControlsGetCall {
+ c.ifNoneMatch_ = entityTag
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *DefaultObjectAccessControlsGetCall) Context(ctx context.Context) *DefaultObjectAccessControlsGetCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := make(http.Header)
+ reqHeaders.Set("User-Agent", c.s.userAgent())
+ if c.ifNoneMatch_ != "" {
+ reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
+ }
+ var body io.Reader = nil
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "bucket": c.bucket,
+ "entity": c.entity,
+ })
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "storage.defaultObjectAccessControls.get" call.
+// Exactly one of *ObjectAccessControl or error will be non-nil. Any
+// non-2xx status code is an error. Response headers are in either
+// *ObjectAccessControl.ServerResponse.Header or (if a response was
+// returned at all) in error.(*googleapi.Error).Header. Use
+// googleapi.IsNotModified to check whether the returned error was
+// because http.StatusNotModified was returned.
+func (c *DefaultObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &ObjectAccessControl{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Returns the default object ACL entry for the specified entity on the specified bucket.",
+ // "httpMethod": "GET",
+ // "id": "storage.defaultObjectAccessControls.get",
+ // "parameterOrder": [
+ // "bucket",
+ // "entity"
+ // ],
+ // "parameters": {
+ // "bucket": {
+ // "description": "Name of a bucket.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "entity": {
+ // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "b/{bucket}/defaultObjectAcl/{entity}",
+ // "response": {
+ // "$ref": "ObjectAccessControl"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/devstorage.full_control"
+ // ]
+ // }
+
+}
+
+// method id "storage.defaultObjectAccessControls.insert":
+
+type DefaultObjectAccessControlsInsertCall struct {
+ s *Service
+ bucket string
+ objectaccesscontrol *ObjectAccessControl
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+}
+
+// Insert: Creates a new default object ACL entry on the specified
+// bucket.
+func (r *DefaultObjectAccessControlsService) Insert(bucket string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsInsertCall {
+ c := &DefaultObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.bucket = bucket
+ c.objectaccesscontrol = objectaccesscontrol
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *DefaultObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsInsertCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *DefaultObjectAccessControlsInsertCall) Context(ctx context.Context) *DefaultObjectAccessControlsInsertCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := make(http.Header)
+ reqHeaders.Set("User-Agent", c.s.userAgent())
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol)
+ if err != nil {
+ return nil, err
+ }
+ reqHeaders.Set("Content-Type", "application/json")
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "bucket": c.bucket,
+ })
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "storage.defaultObjectAccessControls.insert" call.
+// Exactly one of *ObjectAccessControl or error will be non-nil. Any
+// non-2xx status code is an error. Response headers are in either
+// *ObjectAccessControl.ServerResponse.Header or (if a response was
+// returned at all) in error.(*googleapi.Error).Header. Use
+// googleapi.IsNotModified to check whether the returned error was
+// because http.StatusNotModified was returned.
+func (c *DefaultObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &ObjectAccessControl{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Creates a new default object ACL entry on the specified bucket.",
+ // "httpMethod": "POST",
+ // "id": "storage.defaultObjectAccessControls.insert",
+ // "parameterOrder": [
+ // "bucket"
+ // ],
+ // "parameters": {
+ // "bucket": {
+ // "description": "Name of a bucket.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "b/{bucket}/defaultObjectAcl",
+ // "request": {
+ // "$ref": "ObjectAccessControl"
+ // },
+ // "response": {
+ // "$ref": "ObjectAccessControl"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/devstorage.full_control"
+ // ]
+ // }
+
+}
+
+// method id "storage.defaultObjectAccessControls.list":
+
+type DefaultObjectAccessControlsListCall struct {
+ s *Service
+ bucket string
+ urlParams_ gensupport.URLParams
+ ifNoneMatch_ string
+ ctx_ context.Context
+}
+
+// List: Retrieves default object ACL entries on the specified bucket.
+func (r *DefaultObjectAccessControlsService) List(bucket string) *DefaultObjectAccessControlsListCall {
+ c := &DefaultObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.bucket = bucket
+ return c
+}
+
+// IfMetagenerationMatch sets the optional parameter
+// "ifMetagenerationMatch": If present, only return default ACL listing
+// if the bucket's current metageneration matches this value.
+func (c *DefaultObjectAccessControlsListCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *DefaultObjectAccessControlsListCall {
+ c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
+ return c
+}
+
+// IfMetagenerationNotMatch sets the optional parameter
+// "ifMetagenerationNotMatch": If present, only return default ACL
+// listing if the bucket's current metageneration does not match the
+// given value.
+func (c *DefaultObjectAccessControlsListCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *DefaultObjectAccessControlsListCall {
+ c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *DefaultObjectAccessControlsListCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsListCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// IfNoneMatch sets the optional parameter which makes the operation
+// fail if the object's ETag matches the given value. This is useful for
+// getting updates only after the object has changed since the last
+// request. Use googleapi.IsNotModified to check whether the response
+// error from Do is the result of In-None-Match.
+func (c *DefaultObjectAccessControlsListCall) IfNoneMatch(entityTag string) *DefaultObjectAccessControlsListCall {
+ c.ifNoneMatch_ = entityTag
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *DefaultObjectAccessControlsListCall) Context(ctx context.Context) *DefaultObjectAccessControlsListCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := make(http.Header)
+ reqHeaders.Set("User-Agent", c.s.userAgent())
+ if c.ifNoneMatch_ != "" {
+ reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
+ }
+ var body io.Reader = nil
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "bucket": c.bucket,
+ })
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "storage.defaultObjectAccessControls.list" call.
+// Exactly one of *ObjectAccessControls or error will be non-nil. Any
+// non-2xx status code is an error. Response headers are in either
+// *ObjectAccessControls.ServerResponse.Header or (if a response was
+// returned at all) in error.(*googleapi.Error).Header. Use
+// googleapi.IsNotModified to check whether the returned error was
+// because http.StatusNotModified was returned.
+func (c *DefaultObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControls, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &ObjectAccessControls{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves default object ACL entries on the specified bucket.",
+ // "httpMethod": "GET",
+ // "id": "storage.defaultObjectAccessControls.list",
+ // "parameterOrder": [
+ // "bucket"
+ // ],
+ // "parameters": {
+ // "bucket": {
+ // "description": "Name of a bucket.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "ifMetagenerationMatch": {
+ // "description": "If present, only return default ACL listing if the bucket's current metageneration matches this value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifMetagenerationNotMatch": {
+ // "description": "If present, only return default ACL listing if the bucket's current metageneration does not match the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // }
+ // },
+ // "path": "b/{bucket}/defaultObjectAcl",
+ // "response": {
+ // "$ref": "ObjectAccessControls"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/devstorage.full_control"
+ // ]
+ // }
+
+}
+
+// method id "storage.defaultObjectAccessControls.patch":
+
+type DefaultObjectAccessControlsPatchCall struct {
+ s *Service
+ bucket string
+ entity string
+ objectaccesscontrol *ObjectAccessControl
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+}
+
+// Patch: Updates a default object ACL entry on the specified bucket.
+// This method supports patch semantics.
+func (r *DefaultObjectAccessControlsService) Patch(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsPatchCall {
+ c := &DefaultObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.bucket = bucket
+ c.entity = entity
+ c.objectaccesscontrol = objectaccesscontrol
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *DefaultObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsPatchCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *DefaultObjectAccessControlsPatchCall) Context(ctx context.Context) *DefaultObjectAccessControlsPatchCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := make(http.Header)
+ reqHeaders.Set("User-Agent", c.s.userAgent())
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol)
+ if err != nil {
+ return nil, err
+ }
+ reqHeaders.Set("Content-Type", "application/json")
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("PATCH", urls, body)
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "bucket": c.bucket,
+ "entity": c.entity,
+ })
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "storage.defaultObjectAccessControls.patch" call.
+// Exactly one of *ObjectAccessControl or error will be non-nil. Any
+// non-2xx status code is an error. Response headers are in either
+// *ObjectAccessControl.ServerResponse.Header or (if a response was
+// returned at all) in error.(*googleapi.Error).Header. Use
+// googleapi.IsNotModified to check whether the returned error was
+// because http.StatusNotModified was returned.
+func (c *DefaultObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &ObjectAccessControl{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Updates a default object ACL entry on the specified bucket. This method supports patch semantics.",
+ // "httpMethod": "PATCH",
+ // "id": "storage.defaultObjectAccessControls.patch",
+ // "parameterOrder": [
+ // "bucket",
+ // "entity"
+ // ],
+ // "parameters": {
+ // "bucket": {
+ // "description": "Name of a bucket.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "entity": {
+ // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "b/{bucket}/defaultObjectAcl/{entity}",
+ // "request": {
+ // "$ref": "ObjectAccessControl"
+ // },
+ // "response": {
+ // "$ref": "ObjectAccessControl"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/devstorage.full_control"
+ // ]
+ // }
+
+}
+
+// method id "storage.defaultObjectAccessControls.update":
+
+type DefaultObjectAccessControlsUpdateCall struct {
+ s *Service
+ bucket string
+ entity string
+ objectaccesscontrol *ObjectAccessControl
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+}
+
+// Update: Updates a default object ACL entry on the specified bucket.
+func (r *DefaultObjectAccessControlsService) Update(bucket string, entity string, objectaccesscontrol *ObjectAccessControl) *DefaultObjectAccessControlsUpdateCall {
+ c := &DefaultObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.bucket = bucket
+ c.entity = entity
+ c.objectaccesscontrol = objectaccesscontrol
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *DefaultObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *DefaultObjectAccessControlsUpdateCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *DefaultObjectAccessControlsUpdateCall) Context(ctx context.Context) *DefaultObjectAccessControlsUpdateCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := make(http.Header)
+ reqHeaders.Set("User-Agent", c.s.userAgent())
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol)
+ if err != nil {
+ return nil, err
+ }
+ reqHeaders.Set("Content-Type", "application/json")
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/defaultObjectAcl/{entity}")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("PUT", urls, body)
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "bucket": c.bucket,
+ "entity": c.entity,
+ })
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "storage.defaultObjectAccessControls.update" call.
+// Exactly one of *ObjectAccessControl or error will be non-nil. Any
+// non-2xx status code is an error. Response headers are in either
+// *ObjectAccessControl.ServerResponse.Header or (if a response was
+// returned at all) in error.(*googleapi.Error).Header. Use
+// googleapi.IsNotModified to check whether the returned error was
+// because http.StatusNotModified was returned.
+func (c *DefaultObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &ObjectAccessControl{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Updates a default object ACL entry on the specified bucket.",
+ // "httpMethod": "PUT",
+ // "id": "storage.defaultObjectAccessControls.update",
+ // "parameterOrder": [
+ // "bucket",
+ // "entity"
+ // ],
+ // "parameters": {
+ // "bucket": {
+ // "description": "Name of a bucket.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "entity": {
+ // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "b/{bucket}/defaultObjectAcl/{entity}",
+ // "request": {
+ // "$ref": "ObjectAccessControl"
+ // },
+ // "response": {
+ // "$ref": "ObjectAccessControl"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/devstorage.full_control"
+ // ]
+ // }
+
+}
+
+// method id "storage.objectAccessControls.delete":
+
+type ObjectAccessControlsDeleteCall struct {
+ s *Service
+ bucket string
+ object string
+ entity string
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+}
+
+// Delete: Permanently deletes the ACL entry for the specified entity on
+// the specified object.
+func (r *ObjectAccessControlsService) Delete(bucket string, object string, entity string) *ObjectAccessControlsDeleteCall {
+ c := &ObjectAccessControlsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.bucket = bucket
+ c.object = object
+ c.entity = entity
+ return c
+}
+
+// Generation sets the optional parameter "generation": If present,
+// selects a specific revision of this object (as opposed to the latest
+// version, the default).
+func (c *ObjectAccessControlsDeleteCall) Generation(generation int64) *ObjectAccessControlsDeleteCall {
+ c.urlParams_.Set("generation", fmt.Sprint(generation))
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ObjectAccessControlsDeleteCall) Fields(s ...googleapi.Field) *ObjectAccessControlsDeleteCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *ObjectAccessControlsDeleteCall) Context(ctx context.Context) *ObjectAccessControlsDeleteCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := make(http.Header)
+ reqHeaders.Set("User-Agent", c.s.userAgent())
+ var body io.Reader = nil
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("DELETE", urls, body)
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "bucket": c.bucket,
+ "object": c.object,
+ "entity": c.entity,
+ })
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "storage.objectAccessControls.delete" call.
+func (c *ObjectAccessControlsDeleteCall) Do(opts ...googleapi.CallOption) error {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if err != nil {
+ return err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return err
+ }
+ return nil
+ // {
+ // "description": "Permanently deletes the ACL entry for the specified entity on the specified object.",
+ // "httpMethod": "DELETE",
+ // "id": "storage.objectAccessControls.delete",
+ // "parameterOrder": [
+ // "bucket",
+ // "object",
+ // "entity"
+ // ],
+ // "parameters": {
+ // "bucket": {
+ // "description": "Name of a bucket.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "entity": {
+ // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "generation": {
+ // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "object": {
+ // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "b/{bucket}/o/{object}/acl/{entity}",
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/devstorage.full_control"
+ // ]
+ // }
+
+}
+
+// method id "storage.objectAccessControls.get":
+
+type ObjectAccessControlsGetCall struct {
+ s *Service
+ bucket string
+ object string
+ entity string
+ urlParams_ gensupport.URLParams
+ ifNoneMatch_ string
+ ctx_ context.Context
+}
+
+// Get: Returns the ACL entry for the specified entity on the specified
+// object.
+func (r *ObjectAccessControlsService) Get(bucket string, object string, entity string) *ObjectAccessControlsGetCall {
+ c := &ObjectAccessControlsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.bucket = bucket
+ c.object = object
+ c.entity = entity
+ return c
+}
+
+// Generation sets the optional parameter "generation": If present,
+// selects a specific revision of this object (as opposed to the latest
+// version, the default).
+func (c *ObjectAccessControlsGetCall) Generation(generation int64) *ObjectAccessControlsGetCall {
+ c.urlParams_.Set("generation", fmt.Sprint(generation))
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ObjectAccessControlsGetCall) Fields(s ...googleapi.Field) *ObjectAccessControlsGetCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// IfNoneMatch sets the optional parameter which makes the operation
+// fail if the object's ETag matches the given value. This is useful for
+// getting updates only after the object has changed since the last
+// request. Use googleapi.IsNotModified to check whether the response
+// error from Do is the result of In-None-Match.
+func (c *ObjectAccessControlsGetCall) IfNoneMatch(entityTag string) *ObjectAccessControlsGetCall {
+ c.ifNoneMatch_ = entityTag
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *ObjectAccessControlsGetCall) Context(ctx context.Context) *ObjectAccessControlsGetCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := make(http.Header)
+ reqHeaders.Set("User-Agent", c.s.userAgent())
+ if c.ifNoneMatch_ != "" {
+ reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
+ }
+ var body io.Reader = nil
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "bucket": c.bucket,
+ "object": c.object,
+ "entity": c.entity,
+ })
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "storage.objectAccessControls.get" call.
+// Exactly one of *ObjectAccessControl or error will be non-nil. Any
+// non-2xx status code is an error. Response headers are in either
+// *ObjectAccessControl.ServerResponse.Header or (if a response was
+// returned at all) in error.(*googleapi.Error).Header. Use
+// googleapi.IsNotModified to check whether the returned error was
+// because http.StatusNotModified was returned.
+func (c *ObjectAccessControlsGetCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &ObjectAccessControl{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Returns the ACL entry for the specified entity on the specified object.",
+ // "httpMethod": "GET",
+ // "id": "storage.objectAccessControls.get",
+ // "parameterOrder": [
+ // "bucket",
+ // "object",
+ // "entity"
+ // ],
+ // "parameters": {
+ // "bucket": {
+ // "description": "Name of a bucket.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "entity": {
+ // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "generation": {
+ // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "object": {
+ // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "b/{bucket}/o/{object}/acl/{entity}",
+ // "response": {
+ // "$ref": "ObjectAccessControl"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/devstorage.full_control"
+ // ]
+ // }
+
+}
+
+// method id "storage.objectAccessControls.insert":
+
+type ObjectAccessControlsInsertCall struct {
+ s *Service
+ bucket string
+ object string
+ objectaccesscontrol *ObjectAccessControl
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+}
+
+// Insert: Creates a new ACL entry on the specified object.
+func (r *ObjectAccessControlsService) Insert(bucket string, object string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsInsertCall {
+ c := &ObjectAccessControlsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.bucket = bucket
+ c.object = object
+ c.objectaccesscontrol = objectaccesscontrol
+ return c
+}
+
+// Generation sets the optional parameter "generation": If present,
+// selects a specific revision of this object (as opposed to the latest
+// version, the default).
+func (c *ObjectAccessControlsInsertCall) Generation(generation int64) *ObjectAccessControlsInsertCall {
+ c.urlParams_.Set("generation", fmt.Sprint(generation))
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ObjectAccessControlsInsertCall) Fields(s ...googleapi.Field) *ObjectAccessControlsInsertCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *ObjectAccessControlsInsertCall) Context(ctx context.Context) *ObjectAccessControlsInsertCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := make(http.Header)
+ reqHeaders.Set("User-Agent", c.s.userAgent())
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol)
+ if err != nil {
+ return nil, err
+ }
+ reqHeaders.Set("Content-Type", "application/json")
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "bucket": c.bucket,
+ "object": c.object,
+ })
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "storage.objectAccessControls.insert" call.
+// Exactly one of *ObjectAccessControl or error will be non-nil. Any
+// non-2xx status code is an error. Response headers are in either
+// *ObjectAccessControl.ServerResponse.Header or (if a response was
+// returned at all) in error.(*googleapi.Error).Header. Use
+// googleapi.IsNotModified to check whether the returned error was
+// because http.StatusNotModified was returned.
+func (c *ObjectAccessControlsInsertCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &ObjectAccessControl{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Creates a new ACL entry on the specified object.",
+ // "httpMethod": "POST",
+ // "id": "storage.objectAccessControls.insert",
+ // "parameterOrder": [
+ // "bucket",
+ // "object"
+ // ],
+ // "parameters": {
+ // "bucket": {
+ // "description": "Name of a bucket.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "generation": {
+ // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "object": {
+ // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "b/{bucket}/o/{object}/acl",
+ // "request": {
+ // "$ref": "ObjectAccessControl"
+ // },
+ // "response": {
+ // "$ref": "ObjectAccessControl"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/devstorage.full_control"
+ // ]
+ // }
+
+}
+
+// method id "storage.objectAccessControls.list":
+
+type ObjectAccessControlsListCall struct {
+ s *Service
+ bucket string
+ object string
+ urlParams_ gensupport.URLParams
+ ifNoneMatch_ string
+ ctx_ context.Context
+}
+
+// List: Retrieves ACL entries on the specified object.
+func (r *ObjectAccessControlsService) List(bucket string, object string) *ObjectAccessControlsListCall {
+ c := &ObjectAccessControlsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.bucket = bucket
+ c.object = object
+ return c
+}
+
+// Generation sets the optional parameter "generation": If present,
+// selects a specific revision of this object (as opposed to the latest
+// version, the default).
+func (c *ObjectAccessControlsListCall) Generation(generation int64) *ObjectAccessControlsListCall {
+ c.urlParams_.Set("generation", fmt.Sprint(generation))
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ObjectAccessControlsListCall) Fields(s ...googleapi.Field) *ObjectAccessControlsListCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// IfNoneMatch sets the optional parameter which makes the operation
+// fail if the object's ETag matches the given value. This is useful for
+// getting updates only after the object has changed since the last
+// request. Use googleapi.IsNotModified to check whether the response
+// error from Do is the result of In-None-Match.
+func (c *ObjectAccessControlsListCall) IfNoneMatch(entityTag string) *ObjectAccessControlsListCall {
+ c.ifNoneMatch_ = entityTag
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *ObjectAccessControlsListCall) Context(ctx context.Context) *ObjectAccessControlsListCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := make(http.Header)
+ reqHeaders.Set("User-Agent", c.s.userAgent())
+ if c.ifNoneMatch_ != "" {
+ reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
+ }
+ var body io.Reader = nil
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "bucket": c.bucket,
+ "object": c.object,
+ })
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "storage.objectAccessControls.list" call.
+// Exactly one of *ObjectAccessControls or error will be non-nil. Any
+// non-2xx status code is an error. Response headers are in either
+// *ObjectAccessControls.ServerResponse.Header or (if a response was
+// returned at all) in error.(*googleapi.Error).Header. Use
+// googleapi.IsNotModified to check whether the returned error was
+// because http.StatusNotModified was returned.
+func (c *ObjectAccessControlsListCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControls, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &ObjectAccessControls{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves ACL entries on the specified object.",
+ // "httpMethod": "GET",
+ // "id": "storage.objectAccessControls.list",
+ // "parameterOrder": [
+ // "bucket",
+ // "object"
+ // ],
+ // "parameters": {
+ // "bucket": {
+ // "description": "Name of a bucket.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "generation": {
+ // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "object": {
+ // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "b/{bucket}/o/{object}/acl",
+ // "response": {
+ // "$ref": "ObjectAccessControls"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/devstorage.full_control"
+ // ]
+ // }
+
+}
+
+// method id "storage.objectAccessControls.patch":
+
+type ObjectAccessControlsPatchCall struct {
+ s *Service
+ bucket string
+ object string
+ entity string
+ objectaccesscontrol *ObjectAccessControl
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+}
+
+// Patch: Updates an ACL entry on the specified object. This method
+// supports patch semantics.
+func (r *ObjectAccessControlsService) Patch(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsPatchCall {
+ c := &ObjectAccessControlsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.bucket = bucket
+ c.object = object
+ c.entity = entity
+ c.objectaccesscontrol = objectaccesscontrol
+ return c
+}
+
+// Generation sets the optional parameter "generation": If present,
+// selects a specific revision of this object (as opposed to the latest
+// version, the default).
+func (c *ObjectAccessControlsPatchCall) Generation(generation int64) *ObjectAccessControlsPatchCall {
+ c.urlParams_.Set("generation", fmt.Sprint(generation))
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ObjectAccessControlsPatchCall) Fields(s ...googleapi.Field) *ObjectAccessControlsPatchCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *ObjectAccessControlsPatchCall) Context(ctx context.Context) *ObjectAccessControlsPatchCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := make(http.Header)
+ reqHeaders.Set("User-Agent", c.s.userAgent())
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol)
+ if err != nil {
+ return nil, err
+ }
+ reqHeaders.Set("Content-Type", "application/json")
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("PATCH", urls, body)
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "bucket": c.bucket,
+ "object": c.object,
+ "entity": c.entity,
+ })
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "storage.objectAccessControls.patch" call.
+// Exactly one of *ObjectAccessControl or error will be non-nil. Any
+// non-2xx status code is an error. Response headers are in either
+// *ObjectAccessControl.ServerResponse.Header or (if a response was
+// returned at all) in error.(*googleapi.Error).Header. Use
+// googleapi.IsNotModified to check whether the returned error was
+// because http.StatusNotModified was returned.
+func (c *ObjectAccessControlsPatchCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &ObjectAccessControl{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Updates an ACL entry on the specified object. This method supports patch semantics.",
+ // "httpMethod": "PATCH",
+ // "id": "storage.objectAccessControls.patch",
+ // "parameterOrder": [
+ // "bucket",
+ // "object",
+ // "entity"
+ // ],
+ // "parameters": {
+ // "bucket": {
+ // "description": "Name of a bucket.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "entity": {
+ // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "generation": {
+ // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "object": {
+ // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "b/{bucket}/o/{object}/acl/{entity}",
+ // "request": {
+ // "$ref": "ObjectAccessControl"
+ // },
+ // "response": {
+ // "$ref": "ObjectAccessControl"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/devstorage.full_control"
+ // ]
+ // }
+
+}
+
+// method id "storage.objectAccessControls.update":
+
+type ObjectAccessControlsUpdateCall struct {
+ s *Service
+ bucket string
+ object string
+ entity string
+ objectaccesscontrol *ObjectAccessControl
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+}
+
+// Update: Updates an ACL entry on the specified object.
+func (r *ObjectAccessControlsService) Update(bucket string, object string, entity string, objectaccesscontrol *ObjectAccessControl) *ObjectAccessControlsUpdateCall {
+ c := &ObjectAccessControlsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.bucket = bucket
+ c.object = object
+ c.entity = entity
+ c.objectaccesscontrol = objectaccesscontrol
+ return c
+}
+
+// Generation sets the optional parameter "generation": If present,
+// selects a specific revision of this object (as opposed to the latest
+// version, the default).
+func (c *ObjectAccessControlsUpdateCall) Generation(generation int64) *ObjectAccessControlsUpdateCall {
+ c.urlParams_.Set("generation", fmt.Sprint(generation))
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ObjectAccessControlsUpdateCall) Fields(s ...googleapi.Field) *ObjectAccessControlsUpdateCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *ObjectAccessControlsUpdateCall) Context(ctx context.Context) *ObjectAccessControlsUpdateCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := make(http.Header)
+ reqHeaders.Set("User-Agent", c.s.userAgent())
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.objectaccesscontrol)
+ if err != nil {
+ return nil, err
+ }
+ reqHeaders.Set("Content-Type", "application/json")
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}/acl/{entity}")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("PUT", urls, body)
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "bucket": c.bucket,
+ "object": c.object,
+ "entity": c.entity,
+ })
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "storage.objectAccessControls.update" call.
+// Exactly one of *ObjectAccessControl or error will be non-nil. Any
+// non-2xx status code is an error. Response headers are in either
+// *ObjectAccessControl.ServerResponse.Header or (if a response was
+// returned at all) in error.(*googleapi.Error).Header. Use
+// googleapi.IsNotModified to check whether the returned error was
+// because http.StatusNotModified was returned.
+func (c *ObjectAccessControlsUpdateCall) Do(opts ...googleapi.CallOption) (*ObjectAccessControl, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &ObjectAccessControl{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Updates an ACL entry on the specified object.",
+ // "httpMethod": "PUT",
+ // "id": "storage.objectAccessControls.update",
+ // "parameterOrder": [
+ // "bucket",
+ // "object",
+ // "entity"
+ // ],
+ // "parameters": {
+ // "bucket": {
+ // "description": "Name of a bucket.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "entity": {
+ // "description": "The entity holding the permission. Can be user-userId, user-emailAddress, group-groupId, group-emailAddress, allUsers, or allAuthenticatedUsers.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "generation": {
+ // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "object": {
+ // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "b/{bucket}/o/{object}/acl/{entity}",
+ // "request": {
+ // "$ref": "ObjectAccessControl"
+ // },
+ // "response": {
+ // "$ref": "ObjectAccessControl"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/devstorage.full_control"
+ // ]
+ // }
+
+}
+
+// method id "storage.objects.compose":
+
+type ObjectsComposeCall struct {
+ s *Service
+ destinationBucket string
+ destinationObject string
+ composerequest *ComposeRequest
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+}
+
+// Compose: Concatenates a list of existing objects into a new object in
+// the same bucket.
+func (r *ObjectsService) Compose(destinationBucket string, destinationObject string, composerequest *ComposeRequest) *ObjectsComposeCall {
+ c := &ObjectsComposeCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.destinationBucket = destinationBucket
+ c.destinationObject = destinationObject
+ c.composerequest = composerequest
+ return c
+}
+
+// DestinationPredefinedAcl sets the optional parameter
+// "destinationPredefinedAcl": Apply a predefined set of access controls
+// to the destination object.
+//
+// Possible values:
+// "authenticatedRead" - Object owner gets OWNER access, and
+// allAuthenticatedUsers get READER access.
+// "bucketOwnerFullControl" - Object owner gets OWNER access, and
+// project team owners get OWNER access.
+// "bucketOwnerRead" - Object owner gets OWNER access, and project
+// team owners get READER access.
+// "private" - Object owner gets OWNER access.
+// "projectPrivate" - Object owner gets OWNER access, and project team
+// members get access according to their roles.
+// "publicRead" - Object owner gets OWNER access, and allUsers get
+// READER access.
+func (c *ObjectsComposeCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsComposeCall {
+ c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl)
+ return c
+}
+
+// IfGenerationMatch sets the optional parameter "ifGenerationMatch":
+// Makes the operation conditional on whether the object's current
+// generation matches the given value.
+func (c *ObjectsComposeCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsComposeCall {
+ c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch))
+ return c
+}
+
+// IfMetagenerationMatch sets the optional parameter
+// "ifMetagenerationMatch": Makes the operation conditional on whether
+// the object's current metageneration matches the given value.
+func (c *ObjectsComposeCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsComposeCall {
+ c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ObjectsComposeCall) Fields(s ...googleapi.Field) *ObjectsComposeCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do and Download
+// methods. Any pending HTTP request will be aborted if the provided
+// context is canceled.
+func (c *ObjectsComposeCall) Context(ctx context.Context) *ObjectsComposeCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := make(http.Header)
+ reqHeaders.Set("User-Agent", c.s.userAgent())
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.composerequest)
+ if err != nil {
+ return nil, err
+ }
+ reqHeaders.Set("Content-Type", "application/json")
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "b/{destinationBucket}/o/{destinationObject}/compose")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "destinationBucket": c.destinationBucket,
+ "destinationObject": c.destinationObject,
+ })
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Download fetches the API endpoint's "media" value, instead of the normal
+// API response value. If the returned error is nil, the Response is guaranteed to
+// have a 2xx status code. Callers must close the Response.Body as usual.
+func (c *ObjectsComposeCall) Download(opts ...googleapi.CallOption) (*http.Response, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("media")
+ if err != nil {
+ return nil, err
+ }
+ if err := googleapi.CheckMediaResponse(res); err != nil {
+ res.Body.Close()
+ return nil, err
+ }
+ return res, nil
+}
+
+// Do executes the "storage.objects.compose" call.
+// Exactly one of *Object or error will be non-nil. Any non-2xx status
+// code is an error. Response headers are in either
+// *Object.ServerResponse.Header or (if a response was returned at all)
+// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
+// check whether the returned error was because http.StatusNotModified
+// was returned.
+func (c *ObjectsComposeCall) Do(opts ...googleapi.CallOption) (*Object, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &Object{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Concatenates a list of existing objects into a new object in the same bucket.",
+ // "httpMethod": "POST",
+ // "id": "storage.objects.compose",
+ // "parameterOrder": [
+ // "destinationBucket",
+ // "destinationObject"
+ // ],
+ // "parameters": {
+ // "destinationBucket": {
+ // "description": "Name of the bucket in which to store the new object.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "destinationObject": {
+ // "description": "Name of the new object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "destinationPredefinedAcl": {
+ // "description": "Apply a predefined set of access controls to the destination object.",
+ // "enum": [
+ // "authenticatedRead",
+ // "bucketOwnerFullControl",
+ // "bucketOwnerRead",
+ // "private",
+ // "projectPrivate",
+ // "publicRead"
+ // ],
+ // "enumDescriptions": [
+ // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
+ // "Object owner gets OWNER access, and project team owners get OWNER access.",
+ // "Object owner gets OWNER access, and project team owners get READER access.",
+ // "Object owner gets OWNER access.",
+ // "Object owner gets OWNER access, and project team members get access according to their roles.",
+ // "Object owner gets OWNER access, and allUsers get READER access."
+ // ],
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifGenerationMatch": {
+ // "description": "Makes the operation conditional on whether the object's current generation matches the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifMetagenerationMatch": {
+ // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // }
+ // },
+ // "path": "b/{destinationBucket}/o/{destinationObject}/compose",
+ // "request": {
+ // "$ref": "ComposeRequest"
+ // },
+ // "response": {
+ // "$ref": "Object"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/devstorage.full_control",
+ // "https://www.googleapis.com/auth/devstorage.read_write"
+ // ],
+ // "supportsMediaDownload": true,
+ // "useMediaDownloadService": true
+ // }
+
+}
+
+// method id "storage.objects.copy":
+
+type ObjectsCopyCall struct {
+ s *Service
+ sourceBucket string
+ sourceObject string
+ destinationBucket string
+ destinationObject string
+ object *Object
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+}
+
+// Copy: Copies a source object to a destination object. Optionally
+// overrides metadata.
+func (r *ObjectsService) Copy(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsCopyCall {
+ c := &ObjectsCopyCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.sourceBucket = sourceBucket
+ c.sourceObject = sourceObject
+ c.destinationBucket = destinationBucket
+ c.destinationObject = destinationObject
+ c.object = object
+ return c
+}
+
+// DestinationPredefinedAcl sets the optional parameter
+// "destinationPredefinedAcl": Apply a predefined set of access controls
+// to the destination object.
+//
+// Possible values:
+// "authenticatedRead" - Object owner gets OWNER access, and
+// allAuthenticatedUsers get READER access.
+// "bucketOwnerFullControl" - Object owner gets OWNER access, and
+// project team owners get OWNER access.
+// "bucketOwnerRead" - Object owner gets OWNER access, and project
+// team owners get READER access.
+// "private" - Object owner gets OWNER access.
+// "projectPrivate" - Object owner gets OWNER access, and project team
+// members get access according to their roles.
+// "publicRead" - Object owner gets OWNER access, and allUsers get
+// READER access.
+func (c *ObjectsCopyCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsCopyCall {
+ c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl)
+ return c
+}
+
+// IfGenerationMatch sets the optional parameter "ifGenerationMatch":
+// Makes the operation conditional on whether the destination object's
+// current generation matches the given value.
+func (c *ObjectsCopyCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsCopyCall {
+ c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch))
+ return c
+}
+
+// IfGenerationNotMatch sets the optional parameter
+// "ifGenerationNotMatch": Makes the operation conditional on whether
+// the destination object's current generation does not match the given
+// value.
+func (c *ObjectsCopyCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsCopyCall {
+ c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch))
+ return c
+}
+
+// IfMetagenerationMatch sets the optional parameter
+// "ifMetagenerationMatch": Makes the operation conditional on whether
+// the destination object's current metageneration matches the given
+// value.
+func (c *ObjectsCopyCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsCopyCall {
+ c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
+ return c
+}
+
+// IfMetagenerationNotMatch sets the optional parameter
+// "ifMetagenerationNotMatch": Makes the operation conditional on
+// whether the destination object's current metageneration does not
+// match the given value.
+func (c *ObjectsCopyCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsCopyCall {
+ c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
+ return c
+}
+
+// IfSourceGenerationMatch sets the optional parameter
+// "ifSourceGenerationMatch": Makes the operation conditional on whether
+// the source object's generation matches the given value.
+func (c *ObjectsCopyCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsCopyCall {
+ c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch))
+ return c
+}
+
+// IfSourceGenerationNotMatch sets the optional parameter
+// "ifSourceGenerationNotMatch": Makes the operation conditional on
+// whether the source object's generation does not match the given
+// value.
+func (c *ObjectsCopyCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsCopyCall {
+ c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch))
+ return c
+}
+
+// IfSourceMetagenerationMatch sets the optional parameter
+// "ifSourceMetagenerationMatch": Makes the operation conditional on
+// whether the source object's current metageneration matches the given
+// value.
+func (c *ObjectsCopyCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsCopyCall {
+ c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch))
+ return c
+}
+
+// IfSourceMetagenerationNotMatch sets the optional parameter
+// "ifSourceMetagenerationNotMatch": Makes the operation conditional on
+// whether the source object's current metageneration does not match the
+// given value.
+func (c *ObjectsCopyCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsCopyCall {
+ c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch))
+ return c
+}
+
+// Projection sets the optional parameter "projection": Set of
+// properties to return. Defaults to noAcl, unless the object resource
+// specifies the acl property, when it defaults to full.
+//
+// Possible values:
+// "full" - Include all properties.
+// "noAcl" - Omit the owner, acl property.
+func (c *ObjectsCopyCall) Projection(projection string) *ObjectsCopyCall {
+ c.urlParams_.Set("projection", projection)
+ return c
+}
+
+// SourceGeneration sets the optional parameter "sourceGeneration": If
+// present, selects a specific revision of the source object (as opposed
+// to the latest version, the default).
+func (c *ObjectsCopyCall) SourceGeneration(sourceGeneration int64) *ObjectsCopyCall {
+ c.urlParams_.Set("sourceGeneration", fmt.Sprint(sourceGeneration))
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ObjectsCopyCall) Fields(s ...googleapi.Field) *ObjectsCopyCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do and Download
+// methods. Any pending HTTP request will be aborted if the provided
+// context is canceled.
+func (c *ObjectsCopyCall) Context(ctx context.Context) *ObjectsCopyCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := make(http.Header)
+ reqHeaders.Set("User-Agent", c.s.userAgent())
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.object)
+ if err != nil {
+ return nil, err
+ }
+ reqHeaders.Set("Content-Type", "application/json")
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "sourceBucket": c.sourceBucket,
+ "sourceObject": c.sourceObject,
+ "destinationBucket": c.destinationBucket,
+ "destinationObject": c.destinationObject,
+ })
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Download fetches the API endpoint's "media" value, instead of the normal
+// API response value. If the returned error is nil, the Response is guaranteed to
+// have a 2xx status code. Callers must close the Response.Body as usual.
+func (c *ObjectsCopyCall) Download(opts ...googleapi.CallOption) (*http.Response, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("media")
+ if err != nil {
+ return nil, err
+ }
+ if err := googleapi.CheckMediaResponse(res); err != nil {
+ res.Body.Close()
+ return nil, err
+ }
+ return res, nil
+}
+
+// Do executes the "storage.objects.copy" call.
+// Exactly one of *Object or error will be non-nil. Any non-2xx status
+// code is an error. Response headers are in either
+// *Object.ServerResponse.Header or (if a response was returned at all)
+// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
+// check whether the returned error was because http.StatusNotModified
+// was returned.
+func (c *ObjectsCopyCall) Do(opts ...googleapi.CallOption) (*Object, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &Object{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Copies a source object to a destination object. Optionally overrides metadata.",
+ // "httpMethod": "POST",
+ // "id": "storage.objects.copy",
+ // "parameterOrder": [
+ // "sourceBucket",
+ // "sourceObject",
+ // "destinationBucket",
+ // "destinationObject"
+ // ],
+ // "parameters": {
+ // "destinationBucket": {
+ // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "destinationObject": {
+ // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "destinationPredefinedAcl": {
+ // "description": "Apply a predefined set of access controls to the destination object.",
+ // "enum": [
+ // "authenticatedRead",
+ // "bucketOwnerFullControl",
+ // "bucketOwnerRead",
+ // "private",
+ // "projectPrivate",
+ // "publicRead"
+ // ],
+ // "enumDescriptions": [
+ // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
+ // "Object owner gets OWNER access, and project team owners get OWNER access.",
+ // "Object owner gets OWNER access, and project team owners get READER access.",
+ // "Object owner gets OWNER access.",
+ // "Object owner gets OWNER access, and project team members get access according to their roles.",
+ // "Object owner gets OWNER access, and allUsers get READER access."
+ // ],
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifGenerationMatch": {
+ // "description": "Makes the operation conditional on whether the destination object's current generation matches the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifGenerationNotMatch": {
+ // "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifMetagenerationMatch": {
+ // "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifMetagenerationNotMatch": {
+ // "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifSourceGenerationMatch": {
+ // "description": "Makes the operation conditional on whether the source object's generation matches the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifSourceGenerationNotMatch": {
+ // "description": "Makes the operation conditional on whether the source object's generation does not match the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifSourceMetagenerationMatch": {
+ // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifSourceMetagenerationNotMatch": {
+ // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "projection": {
+ // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.",
+ // "enum": [
+ // "full",
+ // "noAcl"
+ // ],
+ // "enumDescriptions": [
+ // "Include all properties.",
+ // "Omit the owner, acl property."
+ // ],
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "sourceBucket": {
+ // "description": "Name of the bucket in which to find the source object.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "sourceGeneration": {
+ // "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "sourceObject": {
+ // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "b/{sourceBucket}/o/{sourceObject}/copyTo/b/{destinationBucket}/o/{destinationObject}",
+ // "request": {
+ // "$ref": "Object"
+ // },
+ // "response": {
+ // "$ref": "Object"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/devstorage.full_control",
+ // "https://www.googleapis.com/auth/devstorage.read_write"
+ // ],
+ // "supportsMediaDownload": true,
+ // "useMediaDownloadService": true
+ // }
+
+}
+
+// method id "storage.objects.delete":
+
+type ObjectsDeleteCall struct {
+ s *Service
+ bucket string
+ object string
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+}
+
+// Delete: Deletes an object and its metadata. Deletions are permanent
+// if versioning is not enabled for the bucket, or if the generation
+// parameter is used.
+func (r *ObjectsService) Delete(bucket string, object string) *ObjectsDeleteCall {
+ c := &ObjectsDeleteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.bucket = bucket
+ c.object = object
+ return c
+}
+
+// Generation sets the optional parameter "generation": If present,
+// permanently deletes a specific revision of this object (as opposed to
+// the latest version, the default).
+func (c *ObjectsDeleteCall) Generation(generation int64) *ObjectsDeleteCall {
+ c.urlParams_.Set("generation", fmt.Sprint(generation))
+ return c
+}
+
+// IfGenerationMatch sets the optional parameter "ifGenerationMatch":
+// Makes the operation conditional on whether the object's current
+// generation matches the given value.
+func (c *ObjectsDeleteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsDeleteCall {
+ c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch))
+ return c
+}
+
+// IfGenerationNotMatch sets the optional parameter
+// "ifGenerationNotMatch": Makes the operation conditional on whether
+// the object's current generation does not match the given value.
+func (c *ObjectsDeleteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsDeleteCall {
+ c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch))
+ return c
+}
+
+// IfMetagenerationMatch sets the optional parameter
+// "ifMetagenerationMatch": Makes the operation conditional on whether
+// the object's current metageneration matches the given value.
+func (c *ObjectsDeleteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsDeleteCall {
+ c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
+ return c
+}
+
+// IfMetagenerationNotMatch sets the optional parameter
+// "ifMetagenerationNotMatch": Makes the operation conditional on
+// whether the object's current metageneration does not match the given
+// value.
+func (c *ObjectsDeleteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsDeleteCall {
+ c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ObjectsDeleteCall) Fields(s ...googleapi.Field) *ObjectsDeleteCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *ObjectsDeleteCall) Context(ctx context.Context) *ObjectsDeleteCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := make(http.Header)
+ reqHeaders.Set("User-Agent", c.s.userAgent())
+ var body io.Reader = nil
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("DELETE", urls, body)
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "bucket": c.bucket,
+ "object": c.object,
+ })
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "storage.objects.delete" call.
+func (c *ObjectsDeleteCall) Do(opts ...googleapi.CallOption) error {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if err != nil {
+ return err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return err
+ }
+ return nil
+ // {
+ // "description": "Deletes an object and its metadata. Deletions are permanent if versioning is not enabled for the bucket, or if the generation parameter is used.",
+ // "httpMethod": "DELETE",
+ // "id": "storage.objects.delete",
+ // "parameterOrder": [
+ // "bucket",
+ // "object"
+ // ],
+ // "parameters": {
+ // "bucket": {
+ // "description": "Name of the bucket in which the object resides.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "generation": {
+ // "description": "If present, permanently deletes a specific revision of this object (as opposed to the latest version, the default).",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifGenerationMatch": {
+ // "description": "Makes the operation conditional on whether the object's current generation matches the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifGenerationNotMatch": {
+ // "description": "Makes the operation conditional on whether the object's current generation does not match the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifMetagenerationMatch": {
+ // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifMetagenerationNotMatch": {
+ // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "object": {
+ // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "b/{bucket}/o/{object}",
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/devstorage.full_control",
+ // "https://www.googleapis.com/auth/devstorage.read_write"
+ // ]
+ // }
+
+}
+
+// method id "storage.objects.get":
+
+type ObjectsGetCall struct {
+ s *Service
+ bucket string
+ object string
+ urlParams_ gensupport.URLParams
+ ifNoneMatch_ string
+ ctx_ context.Context
+}
+
+// Get: Retrieves an object or its metadata.
+func (r *ObjectsService) Get(bucket string, object string) *ObjectsGetCall {
+ c := &ObjectsGetCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.bucket = bucket
+ c.object = object
+ return c
+}
+
+// Generation sets the optional parameter "generation": If present,
+// selects a specific revision of this object (as opposed to the latest
+// version, the default).
+func (c *ObjectsGetCall) Generation(generation int64) *ObjectsGetCall {
+ c.urlParams_.Set("generation", fmt.Sprint(generation))
+ return c
+}
+
+// IfGenerationMatch sets the optional parameter "ifGenerationMatch":
+// Makes the operation conditional on whether the object's generation
+// matches the given value.
+func (c *ObjectsGetCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsGetCall {
+ c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch))
+ return c
+}
+
+// IfGenerationNotMatch sets the optional parameter
+// "ifGenerationNotMatch": Makes the operation conditional on whether
+// the object's generation does not match the given value.
+func (c *ObjectsGetCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsGetCall {
+ c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch))
+ return c
+}
+
+// IfMetagenerationMatch sets the optional parameter
+// "ifMetagenerationMatch": Makes the operation conditional on whether
+// the object's current metageneration matches the given value.
+func (c *ObjectsGetCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsGetCall {
+ c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
+ return c
+}
+
+// IfMetagenerationNotMatch sets the optional parameter
+// "ifMetagenerationNotMatch": Makes the operation conditional on
+// whether the object's current metageneration does not match the given
+// value.
+func (c *ObjectsGetCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsGetCall {
+ c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
+ return c
+}
+
+// Projection sets the optional parameter "projection": Set of
+// properties to return. Defaults to noAcl.
+//
+// Possible values:
+// "full" - Include all properties.
+// "noAcl" - Omit the owner, acl property.
+func (c *ObjectsGetCall) Projection(projection string) *ObjectsGetCall {
+ c.urlParams_.Set("projection", projection)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ObjectsGetCall) Fields(s ...googleapi.Field) *ObjectsGetCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// IfNoneMatch sets the optional parameter which makes the operation
+// fail if the object's ETag matches the given value. This is useful for
+// getting updates only after the object has changed since the last
+// request. Use googleapi.IsNotModified to check whether the response
+// error from Do is the result of In-None-Match.
+func (c *ObjectsGetCall) IfNoneMatch(entityTag string) *ObjectsGetCall {
+ c.ifNoneMatch_ = entityTag
+ return c
+}
+
+// Context sets the context to be used in this call's Do and Download
+// methods. Any pending HTTP request will be aborted if the provided
+// context is canceled.
+func (c *ObjectsGetCall) Context(ctx context.Context) *ObjectsGetCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := make(http.Header)
+ reqHeaders.Set("User-Agent", c.s.userAgent())
+ if c.ifNoneMatch_ != "" {
+ reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
+ }
+ var body io.Reader = nil
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "bucket": c.bucket,
+ "object": c.object,
+ })
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Download fetches the API endpoint's "media" value, instead of the normal
+// API response value. If the returned error is nil, the Response is guaranteed to
+// have a 2xx status code. Callers must close the Response.Body as usual.
+func (c *ObjectsGetCall) Download(opts ...googleapi.CallOption) (*http.Response, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("media")
+ if err != nil {
+ return nil, err
+ }
+ if err := googleapi.CheckMediaResponse(res); err != nil {
+ res.Body.Close()
+ return nil, err
+ }
+ return res, nil
+}
+
+// Do executes the "storage.objects.get" call.
+// Exactly one of *Object or error will be non-nil. Any non-2xx status
+// code is an error. Response headers are in either
+// *Object.ServerResponse.Header or (if a response was returned at all)
+// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
+// check whether the returned error was because http.StatusNotModified
+// was returned.
+func (c *ObjectsGetCall) Do(opts ...googleapi.CallOption) (*Object, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &Object{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves an object or its metadata.",
+ // "httpMethod": "GET",
+ // "id": "storage.objects.get",
+ // "parameterOrder": [
+ // "bucket",
+ // "object"
+ // ],
+ // "parameters": {
+ // "bucket": {
+ // "description": "Name of the bucket in which the object resides.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "generation": {
+ // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifGenerationMatch": {
+ // "description": "Makes the operation conditional on whether the object's generation matches the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifGenerationNotMatch": {
+ // "description": "Makes the operation conditional on whether the object's generation does not match the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifMetagenerationMatch": {
+ // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifMetagenerationNotMatch": {
+ // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "object": {
+ // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "projection": {
+ // "description": "Set of properties to return. Defaults to noAcl.",
+ // "enum": [
+ // "full",
+ // "noAcl"
+ // ],
+ // "enumDescriptions": [
+ // "Include all properties.",
+ // "Omit the owner, acl property."
+ // ],
+ // "location": "query",
+ // "type": "string"
+ // }
+ // },
+ // "path": "b/{bucket}/o/{object}",
+ // "response": {
+ // "$ref": "Object"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/cloud-platform.read-only",
+ // "https://www.googleapis.com/auth/devstorage.full_control",
+ // "https://www.googleapis.com/auth/devstorage.read_only",
+ // "https://www.googleapis.com/auth/devstorage.read_write"
+ // ],
+ // "supportsMediaDownload": true,
+ // "useMediaDownloadService": true
+ // }
+
+}
+
+// method id "storage.objects.insert":
+
+type ObjectsInsertCall struct {
+ s *Service
+ bucket string
+ object *Object
+ urlParams_ gensupport.URLParams
+ media_ io.Reader
+ mediaBuffer_ *gensupport.MediaBuffer
+ mediaType_ string
+ mediaSize_ int64 // mediaSize, if known. Used only for calls to progressUpdater_.
+ progressUpdater_ googleapi.ProgressUpdater
+ ctx_ context.Context
+}
+
+// Insert: Stores a new object and metadata.
+func (r *ObjectsService) Insert(bucket string, object *Object) *ObjectsInsertCall {
+ c := &ObjectsInsertCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.bucket = bucket
+ c.object = object
+ return c
+}
+
+// ContentEncoding sets the optional parameter "contentEncoding": If
+// set, sets the contentEncoding property of the final object to this
+// value. Setting this parameter is equivalent to setting the
+// contentEncoding metadata property. This can be useful when uploading
+// an object with uploadType=media to indicate the encoding of the
+// content being uploaded.
+func (c *ObjectsInsertCall) ContentEncoding(contentEncoding string) *ObjectsInsertCall {
+ c.urlParams_.Set("contentEncoding", contentEncoding)
+ return c
+}
+
+// IfGenerationMatch sets the optional parameter "ifGenerationMatch":
+// Makes the operation conditional on whether the object's current
+// generation matches the given value.
+func (c *ObjectsInsertCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsInsertCall {
+ c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch))
+ return c
+}
+
+// IfGenerationNotMatch sets the optional parameter
+// "ifGenerationNotMatch": Makes the operation conditional on whether
+// the object's current generation does not match the given value.
+func (c *ObjectsInsertCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsInsertCall {
+ c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch))
+ return c
+}
+
+// IfMetagenerationMatch sets the optional parameter
+// "ifMetagenerationMatch": Makes the operation conditional on whether
+// the object's current metageneration matches the given value.
+func (c *ObjectsInsertCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsInsertCall {
+ c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
+ return c
+}
+
+// IfMetagenerationNotMatch sets the optional parameter
+// "ifMetagenerationNotMatch": Makes the operation conditional on
+// whether the object's current metageneration does not match the given
+// value.
+func (c *ObjectsInsertCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsInsertCall {
+ c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
+ return c
+}
+
+// Name sets the optional parameter "name": Name of the object. Required
+// when the object metadata is not otherwise provided. Overrides the
+// object metadata's name value, if any. For information about how to
+// URL encode object names to be path safe, see Encoding URI Path Parts.
+func (c *ObjectsInsertCall) Name(name string) *ObjectsInsertCall {
+ c.urlParams_.Set("name", name)
+ return c
+}
+
+// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a
+// predefined set of access controls to this object.
+//
+// Possible values:
+// "authenticatedRead" - Object owner gets OWNER access, and
+// allAuthenticatedUsers get READER access.
+// "bucketOwnerFullControl" - Object owner gets OWNER access, and
+// project team owners get OWNER access.
+// "bucketOwnerRead" - Object owner gets OWNER access, and project
+// team owners get READER access.
+// "private" - Object owner gets OWNER access.
+// "projectPrivate" - Object owner gets OWNER access, and project team
+// members get access according to their roles.
+// "publicRead" - Object owner gets OWNER access, and allUsers get
+// READER access.
+func (c *ObjectsInsertCall) PredefinedAcl(predefinedAcl string) *ObjectsInsertCall {
+ c.urlParams_.Set("predefinedAcl", predefinedAcl)
+ return c
+}
+
+// Projection sets the optional parameter "projection": Set of
+// properties to return. Defaults to noAcl, unless the object resource
+// specifies the acl property, when it defaults to full.
+//
+// Possible values:
+// "full" - Include all properties.
+// "noAcl" - Omit the owner, acl property.
+func (c *ObjectsInsertCall) Projection(projection string) *ObjectsInsertCall {
+ c.urlParams_.Set("projection", projection)
+ return c
+}
+
+// Media specifies the media to upload in one or more chunks. The chunk
+// size may be controlled by supplying a MediaOption generated by
+// googleapi.ChunkSize. The chunk size defaults to
+// googleapi.DefaultUploadChunkSize.The Content-Type header used in the
+// upload request will be determined by sniffing the contents of r,
+// unless a MediaOption generated by googleapi.ContentType is
+// supplied.
+// At most one of Media and ResumableMedia may be set.
+func (c *ObjectsInsertCall) Media(r io.Reader, options ...googleapi.MediaOption) *ObjectsInsertCall {
+ opts := googleapi.ProcessMediaOptions(options)
+ chunkSize := opts.ChunkSize
+ if !opts.ForceEmptyContentType {
+ r, c.mediaType_ = gensupport.DetermineContentType(r, opts.ContentType)
+ }
+ c.media_, c.mediaBuffer_ = gensupport.PrepareUpload(r, chunkSize)
+ return c
+}
+
+// ResumableMedia specifies the media to upload in chunks and can be
+// canceled with ctx.
+//
+// Deprecated: use Media instead.
+//
+// At most one of Media and ResumableMedia may be set. mediaType
+// identifies the MIME media type of the upload, such as "image/png". If
+// mediaType is "", it will be auto-detected. The provided ctx will
+// supersede any context previously provided to the Context method.
+func (c *ObjectsInsertCall) ResumableMedia(ctx context.Context, r io.ReaderAt, size int64, mediaType string) *ObjectsInsertCall {
+ c.ctx_ = ctx
+ rdr := gensupport.ReaderAtToReader(r, size)
+ rdr, c.mediaType_ = gensupport.DetermineContentType(rdr, mediaType)
+ c.mediaBuffer_ = gensupport.NewMediaBuffer(rdr, googleapi.DefaultUploadChunkSize)
+ c.media_ = nil
+ c.mediaSize_ = size
+ return c
+}
+
+// ProgressUpdater provides a callback function that will be called
+// after every chunk. It should be a low-latency function in order to
+// not slow down the upload operation. This should only be called when
+// using ResumableMedia (as opposed to Media).
+func (c *ObjectsInsertCall) ProgressUpdater(pu googleapi.ProgressUpdater) *ObjectsInsertCall {
+ c.progressUpdater_ = pu
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ObjectsInsertCall) Fields(s ...googleapi.Field) *ObjectsInsertCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+// This context will supersede any context previously provided to the
+// ResumableMedia method.
+func (c *ObjectsInsertCall) Context(ctx context.Context) *ObjectsInsertCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := make(http.Header)
+ reqHeaders.Set("User-Agent", c.s.userAgent())
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.object)
+ if err != nil {
+ return nil, err
+ }
+ reqHeaders.Set("Content-Type", "application/json")
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o")
+ if c.media_ != nil || c.mediaBuffer_ != nil {
+ urls = strings.Replace(urls, "https://www.googleapis.com/", "https://www.googleapis.com/upload/", 1)
+ protocol := "multipart"
+ if c.mediaBuffer_ != nil {
+ protocol = "resumable"
+ }
+ c.urlParams_.Set("uploadType", protocol)
+ }
+ if body == nil {
+ body = new(bytes.Buffer)
+ reqHeaders.Set("Content-Type", "application/json")
+ }
+ if c.media_ != nil {
+ combined, ctype := gensupport.CombineBodyMedia(body, "application/json", c.media_, c.mediaType_)
+ defer combined.Close()
+ reqHeaders.Set("Content-Type", ctype)
+ body = combined
+ }
+ if c.mediaBuffer_ != nil && c.mediaType_ != "" {
+ reqHeaders.Set("X-Upload-Content-Type", c.mediaType_)
+ }
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "bucket": c.bucket,
+ })
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "storage.objects.insert" call.
+// Exactly one of *Object or error will be non-nil. Any non-2xx status
+// code is an error. Response headers are in either
+// *Object.ServerResponse.Header or (if a response was returned at all)
+// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
+// check whether the returned error was because http.StatusNotModified
+// was returned.
+func (c *ObjectsInsertCall) Do(opts ...googleapi.CallOption) (*Object, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ if c.mediaBuffer_ != nil {
+ loc := res.Header.Get("Location")
+ rx := &gensupport.ResumableUpload{
+ Client: c.s.client,
+ UserAgent: c.s.userAgent(),
+ URI: loc,
+ Media: c.mediaBuffer_,
+ MediaType: c.mediaType_,
+ Callback: func(curr int64) {
+ if c.progressUpdater_ != nil {
+ c.progressUpdater_(curr, c.mediaSize_)
+ }
+ },
+ }
+ ctx := c.ctx_
+ if ctx == nil {
+ ctx = context.TODO()
+ }
+ res, err = rx.Upload(ctx)
+ if err != nil {
+ return nil, err
+ }
+ defer res.Body.Close()
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ }
+ ret := &Object{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Stores a new object and metadata.",
+ // "httpMethod": "POST",
+ // "id": "storage.objects.insert",
+ // "mediaUpload": {
+ // "accept": [
+ // "*/*"
+ // ],
+ // "protocols": {
+ // "resumable": {
+ // "multipart": true,
+ // "path": "/resumable/upload/storage/v1/b/{bucket}/o"
+ // },
+ // "simple": {
+ // "multipart": true,
+ // "path": "/upload/storage/v1/b/{bucket}/o"
+ // }
+ // }
+ // },
+ // "parameterOrder": [
+ // "bucket"
+ // ],
+ // "parameters": {
+ // "bucket": {
+ // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "contentEncoding": {
+ // "description": "If set, sets the contentEncoding property of the final object to this value. Setting this parameter is equivalent to setting the contentEncoding metadata property. This can be useful when uploading an object with uploadType=media to indicate the encoding of the content being uploaded.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifGenerationMatch": {
+ // "description": "Makes the operation conditional on whether the object's current generation matches the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifGenerationNotMatch": {
+ // "description": "Makes the operation conditional on whether the object's current generation does not match the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifMetagenerationMatch": {
+ // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifMetagenerationNotMatch": {
+ // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "name": {
+ // "description": "Name of the object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "predefinedAcl": {
+ // "description": "Apply a predefined set of access controls to this object.",
+ // "enum": [
+ // "authenticatedRead",
+ // "bucketOwnerFullControl",
+ // "bucketOwnerRead",
+ // "private",
+ // "projectPrivate",
+ // "publicRead"
+ // ],
+ // "enumDescriptions": [
+ // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
+ // "Object owner gets OWNER access, and project team owners get OWNER access.",
+ // "Object owner gets OWNER access, and project team owners get READER access.",
+ // "Object owner gets OWNER access.",
+ // "Object owner gets OWNER access, and project team members get access according to their roles.",
+ // "Object owner gets OWNER access, and allUsers get READER access."
+ // ],
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "projection": {
+ // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.",
+ // "enum": [
+ // "full",
+ // "noAcl"
+ // ],
+ // "enumDescriptions": [
+ // "Include all properties.",
+ // "Omit the owner, acl property."
+ // ],
+ // "location": "query",
+ // "type": "string"
+ // }
+ // },
+ // "path": "b/{bucket}/o",
+ // "request": {
+ // "$ref": "Object"
+ // },
+ // "response": {
+ // "$ref": "Object"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/devstorage.full_control",
+ // "https://www.googleapis.com/auth/devstorage.read_write"
+ // ],
+ // "supportsMediaDownload": true,
+ // "supportsMediaUpload": true,
+ // "useMediaDownloadService": true
+ // }
+
+}
+
+// method id "storage.objects.list":
+
+type ObjectsListCall struct {
+ s *Service
+ bucket string
+ urlParams_ gensupport.URLParams
+ ifNoneMatch_ string
+ ctx_ context.Context
+}
+
+// List: Retrieves a list of objects matching the criteria.
+func (r *ObjectsService) List(bucket string) *ObjectsListCall {
+ c := &ObjectsListCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.bucket = bucket
+ return c
+}
+
+// Delimiter sets the optional parameter "delimiter": Returns results in
+// a directory-like mode. items will contain only objects whose names,
+// aside from the prefix, do not contain delimiter. Objects whose names,
+// aside from the prefix, contain delimiter will have their name,
+// truncated after the delimiter, returned in prefixes. Duplicate
+// prefixes are omitted.
+func (c *ObjectsListCall) Delimiter(delimiter string) *ObjectsListCall {
+ c.urlParams_.Set("delimiter", delimiter)
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum number
+// of items plus prefixes to return. As duplicate prefixes are omitted,
+// fewer total results may be returned than requested. The default value
+// of this parameter is 1,000 items.
+func (c *ObjectsListCall) MaxResults(maxResults int64) *ObjectsListCall {
+ c.urlParams_.Set("maxResults", fmt.Sprint(maxResults))
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": A
+// previously-returned page token representing part of the larger set of
+// results to view.
+func (c *ObjectsListCall) PageToken(pageToken string) *ObjectsListCall {
+ c.urlParams_.Set("pageToken", pageToken)
+ return c
+}
+
+// Prefix sets the optional parameter "prefix": Filter results to
+// objects whose names begin with this prefix.
+func (c *ObjectsListCall) Prefix(prefix string) *ObjectsListCall {
+ c.urlParams_.Set("prefix", prefix)
+ return c
+}
+
+// Projection sets the optional parameter "projection": Set of
+// properties to return. Defaults to noAcl.
+//
+// Possible values:
+// "full" - Include all properties.
+// "noAcl" - Omit the owner, acl property.
+func (c *ObjectsListCall) Projection(projection string) *ObjectsListCall {
+ c.urlParams_.Set("projection", projection)
+ return c
+}
+
+// Versions sets the optional parameter "versions": If true, lists all
+// versions of an object as distinct results. The default is false. For
+// more information, see Object Versioning.
+func (c *ObjectsListCall) Versions(versions bool) *ObjectsListCall {
+ c.urlParams_.Set("versions", fmt.Sprint(versions))
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ObjectsListCall) Fields(s ...googleapi.Field) *ObjectsListCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// IfNoneMatch sets the optional parameter which makes the operation
+// fail if the object's ETag matches the given value. This is useful for
+// getting updates only after the object has changed since the last
+// request. Use googleapi.IsNotModified to check whether the response
+// error from Do is the result of In-None-Match.
+func (c *ObjectsListCall) IfNoneMatch(entityTag string) *ObjectsListCall {
+ c.ifNoneMatch_ = entityTag
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *ObjectsListCall) Context(ctx context.Context) *ObjectsListCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := make(http.Header)
+ reqHeaders.Set("User-Agent", c.s.userAgent())
+ if c.ifNoneMatch_ != "" {
+ reqHeaders.Set("If-None-Match", c.ifNoneMatch_)
+ }
+ var body io.Reader = nil
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "bucket": c.bucket,
+ })
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "storage.objects.list" call.
+// Exactly one of *Objects or error will be non-nil. Any non-2xx status
+// code is an error. Response headers are in either
+// *Objects.ServerResponse.Header or (if a response was returned at all)
+// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
+// check whether the returned error was because http.StatusNotModified
+// was returned.
+func (c *ObjectsListCall) Do(opts ...googleapi.CallOption) (*Objects, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &Objects{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves a list of objects matching the criteria.",
+ // "httpMethod": "GET",
+ // "id": "storage.objects.list",
+ // "parameterOrder": [
+ // "bucket"
+ // ],
+ // "parameters": {
+ // "bucket": {
+ // "description": "Name of the bucket in which to look for objects.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "delimiter": {
+ // "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "maxResults": {
+ // "description": "Maximum number of items plus prefixes to return. As duplicate prefixes are omitted, fewer total results may be returned than requested. The default value of this parameter is 1,000 items.",
+ // "format": "uint32",
+ // "location": "query",
+ // "minimum": "0",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "A previously-returned page token representing part of the larger set of results to view.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "prefix": {
+ // "description": "Filter results to objects whose names begin with this prefix.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "projection": {
+ // "description": "Set of properties to return. Defaults to noAcl.",
+ // "enum": [
+ // "full",
+ // "noAcl"
+ // ],
+ // "enumDescriptions": [
+ // "Include all properties.",
+ // "Omit the owner, acl property."
+ // ],
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "versions": {
+ // "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.",
+ // "location": "query",
+ // "type": "boolean"
+ // }
+ // },
+ // "path": "b/{bucket}/o",
+ // "response": {
+ // "$ref": "Objects"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/cloud-platform.read-only",
+ // "https://www.googleapis.com/auth/devstorage.full_control",
+ // "https://www.googleapis.com/auth/devstorage.read_only",
+ // "https://www.googleapis.com/auth/devstorage.read_write"
+ // ],
+ // "supportsSubscription": true
+ // }
+
+}
+
+// Pages invokes f for each page of results.
+// A non-nil error returned from f will halt the iteration.
+// The provided context supersedes any context provided to the Context method.
+func (c *ObjectsListCall) Pages(ctx context.Context, f func(*Objects) error) error {
+ c.ctx_ = ctx
+ defer c.PageToken(c.urlParams_.Get("pageToken")) // reset paging to original point
+ for {
+ x, err := c.Do()
+ if err != nil {
+ return err
+ }
+ if err := f(x); err != nil {
+ return err
+ }
+ if x.NextPageToken == "" {
+ return nil
+ }
+ c.PageToken(x.NextPageToken)
+ }
+}
+
+// method id "storage.objects.patch":
+
+type ObjectsPatchCall struct {
+ s *Service
+ bucket string
+ object string
+ object2 *Object
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+}
+
+// Patch: Updates an object's metadata. This method supports patch
+// semantics.
+func (r *ObjectsService) Patch(bucket string, object string, object2 *Object) *ObjectsPatchCall {
+ c := &ObjectsPatchCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.bucket = bucket
+ c.object = object
+ c.object2 = object2
+ return c
+}
+
+// Generation sets the optional parameter "generation": If present,
+// selects a specific revision of this object (as opposed to the latest
+// version, the default).
+func (c *ObjectsPatchCall) Generation(generation int64) *ObjectsPatchCall {
+ c.urlParams_.Set("generation", fmt.Sprint(generation))
+ return c
+}
+
+// IfGenerationMatch sets the optional parameter "ifGenerationMatch":
+// Makes the operation conditional on whether the object's current
+// generation matches the given value.
+func (c *ObjectsPatchCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsPatchCall {
+ c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch))
+ return c
+}
+
+// IfGenerationNotMatch sets the optional parameter
+// "ifGenerationNotMatch": Makes the operation conditional on whether
+// the object's current generation does not match the given value.
+func (c *ObjectsPatchCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsPatchCall {
+ c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch))
+ return c
+}
+
+// IfMetagenerationMatch sets the optional parameter
+// "ifMetagenerationMatch": Makes the operation conditional on whether
+// the object's current metageneration matches the given value.
+func (c *ObjectsPatchCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsPatchCall {
+ c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
+ return c
+}
+
+// IfMetagenerationNotMatch sets the optional parameter
+// "ifMetagenerationNotMatch": Makes the operation conditional on
+// whether the object's current metageneration does not match the given
+// value.
+func (c *ObjectsPatchCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsPatchCall {
+ c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
+ return c
+}
+
+// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a
+// predefined set of access controls to this object.
+//
+// Possible values:
+// "authenticatedRead" - Object owner gets OWNER access, and
+// allAuthenticatedUsers get READER access.
+// "bucketOwnerFullControl" - Object owner gets OWNER access, and
+// project team owners get OWNER access.
+// "bucketOwnerRead" - Object owner gets OWNER access, and project
+// team owners get READER access.
+// "private" - Object owner gets OWNER access.
+// "projectPrivate" - Object owner gets OWNER access, and project team
+// members get access according to their roles.
+// "publicRead" - Object owner gets OWNER access, and allUsers get
+// READER access.
+func (c *ObjectsPatchCall) PredefinedAcl(predefinedAcl string) *ObjectsPatchCall {
+ c.urlParams_.Set("predefinedAcl", predefinedAcl)
+ return c
+}
+
+// Projection sets the optional parameter "projection": Set of
+// properties to return. Defaults to full.
+//
+// Possible values:
+// "full" - Include all properties.
+// "noAcl" - Omit the owner, acl property.
+func (c *ObjectsPatchCall) Projection(projection string) *ObjectsPatchCall {
+ c.urlParams_.Set("projection", projection)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ObjectsPatchCall) Fields(s ...googleapi.Field) *ObjectsPatchCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *ObjectsPatchCall) Context(ctx context.Context) *ObjectsPatchCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := make(http.Header)
+ reqHeaders.Set("User-Agent", c.s.userAgent())
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2)
+ if err != nil {
+ return nil, err
+ }
+ reqHeaders.Set("Content-Type", "application/json")
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("PATCH", urls, body)
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "bucket": c.bucket,
+ "object": c.object,
+ })
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "storage.objects.patch" call.
+// Exactly one of *Object or error will be non-nil. Any non-2xx status
+// code is an error. Response headers are in either
+// *Object.ServerResponse.Header or (if a response was returned at all)
+// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
+// check whether the returned error was because http.StatusNotModified
+// was returned.
+func (c *ObjectsPatchCall) Do(opts ...googleapi.CallOption) (*Object, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &Object{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Updates an object's metadata. This method supports patch semantics.",
+ // "httpMethod": "PATCH",
+ // "id": "storage.objects.patch",
+ // "parameterOrder": [
+ // "bucket",
+ // "object"
+ // ],
+ // "parameters": {
+ // "bucket": {
+ // "description": "Name of the bucket in which the object resides.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "generation": {
+ // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifGenerationMatch": {
+ // "description": "Makes the operation conditional on whether the object's current generation matches the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifGenerationNotMatch": {
+ // "description": "Makes the operation conditional on whether the object's current generation does not match the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifMetagenerationMatch": {
+ // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifMetagenerationNotMatch": {
+ // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "object": {
+ // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "predefinedAcl": {
+ // "description": "Apply a predefined set of access controls to this object.",
+ // "enum": [
+ // "authenticatedRead",
+ // "bucketOwnerFullControl",
+ // "bucketOwnerRead",
+ // "private",
+ // "projectPrivate",
+ // "publicRead"
+ // ],
+ // "enumDescriptions": [
+ // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
+ // "Object owner gets OWNER access, and project team owners get OWNER access.",
+ // "Object owner gets OWNER access, and project team owners get READER access.",
+ // "Object owner gets OWNER access.",
+ // "Object owner gets OWNER access, and project team members get access according to their roles.",
+ // "Object owner gets OWNER access, and allUsers get READER access."
+ // ],
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "projection": {
+ // "description": "Set of properties to return. Defaults to full.",
+ // "enum": [
+ // "full",
+ // "noAcl"
+ // ],
+ // "enumDescriptions": [
+ // "Include all properties.",
+ // "Omit the owner, acl property."
+ // ],
+ // "location": "query",
+ // "type": "string"
+ // }
+ // },
+ // "path": "b/{bucket}/o/{object}",
+ // "request": {
+ // "$ref": "Object"
+ // },
+ // "response": {
+ // "$ref": "Object"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/devstorage.full_control"
+ // ]
+ // }
+
+}
+
+// method id "storage.objects.rewrite":
+
+type ObjectsRewriteCall struct {
+ s *Service
+ sourceBucket string
+ sourceObject string
+ destinationBucket string
+ destinationObject string
+ object *Object
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+}
+
+// Rewrite: Rewrites a source object to a destination object. Optionally
+// overrides metadata.
+func (r *ObjectsService) Rewrite(sourceBucket string, sourceObject string, destinationBucket string, destinationObject string, object *Object) *ObjectsRewriteCall {
+ c := &ObjectsRewriteCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.sourceBucket = sourceBucket
+ c.sourceObject = sourceObject
+ c.destinationBucket = destinationBucket
+ c.destinationObject = destinationObject
+ c.object = object
+ return c
+}
+
+// DestinationPredefinedAcl sets the optional parameter
+// "destinationPredefinedAcl": Apply a predefined set of access controls
+// to the destination object.
+//
+// Possible values:
+// "authenticatedRead" - Object owner gets OWNER access, and
+// allAuthenticatedUsers get READER access.
+// "bucketOwnerFullControl" - Object owner gets OWNER access, and
+// project team owners get OWNER access.
+// "bucketOwnerRead" - Object owner gets OWNER access, and project
+// team owners get READER access.
+// "private" - Object owner gets OWNER access.
+// "projectPrivate" - Object owner gets OWNER access, and project team
+// members get access according to their roles.
+// "publicRead" - Object owner gets OWNER access, and allUsers get
+// READER access.
+func (c *ObjectsRewriteCall) DestinationPredefinedAcl(destinationPredefinedAcl string) *ObjectsRewriteCall {
+ c.urlParams_.Set("destinationPredefinedAcl", destinationPredefinedAcl)
+ return c
+}
+
+// IfGenerationMatch sets the optional parameter "ifGenerationMatch":
+// Makes the operation conditional on whether the destination object's
+// current generation matches the given value.
+func (c *ObjectsRewriteCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsRewriteCall {
+ c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch))
+ return c
+}
+
+// IfGenerationNotMatch sets the optional parameter
+// "ifGenerationNotMatch": Makes the operation conditional on whether
+// the destination object's current generation does not match the given
+// value.
+func (c *ObjectsRewriteCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsRewriteCall {
+ c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch))
+ return c
+}
+
+// IfMetagenerationMatch sets the optional parameter
+// "ifMetagenerationMatch": Makes the operation conditional on whether
+// the destination object's current metageneration matches the given
+// value.
+func (c *ObjectsRewriteCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsRewriteCall {
+ c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
+ return c
+}
+
+// IfMetagenerationNotMatch sets the optional parameter
+// "ifMetagenerationNotMatch": Makes the operation conditional on
+// whether the destination object's current metageneration does not
+// match the given value.
+func (c *ObjectsRewriteCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsRewriteCall {
+ c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
+ return c
+}
+
+// IfSourceGenerationMatch sets the optional parameter
+// "ifSourceGenerationMatch": Makes the operation conditional on whether
+// the source object's generation matches the given value.
+func (c *ObjectsRewriteCall) IfSourceGenerationMatch(ifSourceGenerationMatch int64) *ObjectsRewriteCall {
+ c.urlParams_.Set("ifSourceGenerationMatch", fmt.Sprint(ifSourceGenerationMatch))
+ return c
+}
+
+// IfSourceGenerationNotMatch sets the optional parameter
+// "ifSourceGenerationNotMatch": Makes the operation conditional on
+// whether the source object's generation does not match the given
+// value.
+func (c *ObjectsRewriteCall) IfSourceGenerationNotMatch(ifSourceGenerationNotMatch int64) *ObjectsRewriteCall {
+ c.urlParams_.Set("ifSourceGenerationNotMatch", fmt.Sprint(ifSourceGenerationNotMatch))
+ return c
+}
+
+// IfSourceMetagenerationMatch sets the optional parameter
+// "ifSourceMetagenerationMatch": Makes the operation conditional on
+// whether the source object's current metageneration matches the given
+// value.
+func (c *ObjectsRewriteCall) IfSourceMetagenerationMatch(ifSourceMetagenerationMatch int64) *ObjectsRewriteCall {
+ c.urlParams_.Set("ifSourceMetagenerationMatch", fmt.Sprint(ifSourceMetagenerationMatch))
+ return c
+}
+
+// IfSourceMetagenerationNotMatch sets the optional parameter
+// "ifSourceMetagenerationNotMatch": Makes the operation conditional on
+// whether the source object's current metageneration does not match the
+// given value.
+func (c *ObjectsRewriteCall) IfSourceMetagenerationNotMatch(ifSourceMetagenerationNotMatch int64) *ObjectsRewriteCall {
+ c.urlParams_.Set("ifSourceMetagenerationNotMatch", fmt.Sprint(ifSourceMetagenerationNotMatch))
+ return c
+}
+
+// MaxBytesRewrittenPerCall sets the optional parameter
+// "maxBytesRewrittenPerCall": The maximum number of bytes that will be
+// rewritten per rewrite request. Most callers shouldn't need to specify
+// this parameter - it is primarily in place to support testing. If
+// specified the value must be an integral multiple of 1 MiB (1048576).
+// Also, this only applies to requests where the source and destination
+// span locations and/or storage classes. Finally, this value must not
+// change across rewrite calls else you'll get an error that the
+// rewriteToken is invalid.
+func (c *ObjectsRewriteCall) MaxBytesRewrittenPerCall(maxBytesRewrittenPerCall int64) *ObjectsRewriteCall {
+ c.urlParams_.Set("maxBytesRewrittenPerCall", fmt.Sprint(maxBytesRewrittenPerCall))
+ return c
+}
+
+// Projection sets the optional parameter "projection": Set of
+// properties to return. Defaults to noAcl, unless the object resource
+// specifies the acl property, when it defaults to full.
+//
+// Possible values:
+// "full" - Include all properties.
+// "noAcl" - Omit the owner, acl property.
+func (c *ObjectsRewriteCall) Projection(projection string) *ObjectsRewriteCall {
+ c.urlParams_.Set("projection", projection)
+ return c
+}
+
+// RewriteToken sets the optional parameter "rewriteToken": Include this
+// field (from the previous rewrite response) on each rewrite request
+// after the first one, until the rewrite response 'done' flag is true.
+// Calls that provide a rewriteToken can omit all other request fields,
+// but if included those fields must match the values provided in the
+// first rewrite request.
+func (c *ObjectsRewriteCall) RewriteToken(rewriteToken string) *ObjectsRewriteCall {
+ c.urlParams_.Set("rewriteToken", rewriteToken)
+ return c
+}
+
+// SourceGeneration sets the optional parameter "sourceGeneration": If
+// present, selects a specific revision of the source object (as opposed
+// to the latest version, the default).
+func (c *ObjectsRewriteCall) SourceGeneration(sourceGeneration int64) *ObjectsRewriteCall {
+ c.urlParams_.Set("sourceGeneration", fmt.Sprint(sourceGeneration))
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ObjectsRewriteCall) Fields(s ...googleapi.Field) *ObjectsRewriteCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *ObjectsRewriteCall) Context(ctx context.Context) *ObjectsRewriteCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := make(http.Header)
+ reqHeaders.Set("User-Agent", c.s.userAgent())
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.object)
+ if err != nil {
+ return nil, err
+ }
+ reqHeaders.Set("Content-Type", "application/json")
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "sourceBucket": c.sourceBucket,
+ "sourceObject": c.sourceObject,
+ "destinationBucket": c.destinationBucket,
+ "destinationObject": c.destinationObject,
+ })
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "storage.objects.rewrite" call.
+// Exactly one of *RewriteResponse or error will be non-nil. Any non-2xx
+// status code is an error. Response headers are in either
+// *RewriteResponse.ServerResponse.Header or (if a response was returned
+// at all) in error.(*googleapi.Error).Header. Use
+// googleapi.IsNotModified to check whether the returned error was
+// because http.StatusNotModified was returned.
+func (c *ObjectsRewriteCall) Do(opts ...googleapi.CallOption) (*RewriteResponse, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &RewriteResponse{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Rewrites a source object to a destination object. Optionally overrides metadata.",
+ // "httpMethod": "POST",
+ // "id": "storage.objects.rewrite",
+ // "parameterOrder": [
+ // "sourceBucket",
+ // "sourceObject",
+ // "destinationBucket",
+ // "destinationObject"
+ // ],
+ // "parameters": {
+ // "destinationBucket": {
+ // "description": "Name of the bucket in which to store the new object. Overrides the provided object metadata's bucket value, if any.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "destinationObject": {
+ // "description": "Name of the new object. Required when the object metadata is not otherwise provided. Overrides the object metadata's name value, if any. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "destinationPredefinedAcl": {
+ // "description": "Apply a predefined set of access controls to the destination object.",
+ // "enum": [
+ // "authenticatedRead",
+ // "bucketOwnerFullControl",
+ // "bucketOwnerRead",
+ // "private",
+ // "projectPrivate",
+ // "publicRead"
+ // ],
+ // "enumDescriptions": [
+ // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
+ // "Object owner gets OWNER access, and project team owners get OWNER access.",
+ // "Object owner gets OWNER access, and project team owners get READER access.",
+ // "Object owner gets OWNER access.",
+ // "Object owner gets OWNER access, and project team members get access according to their roles.",
+ // "Object owner gets OWNER access, and allUsers get READER access."
+ // ],
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifGenerationMatch": {
+ // "description": "Makes the operation conditional on whether the destination object's current generation matches the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifGenerationNotMatch": {
+ // "description": "Makes the operation conditional on whether the destination object's current generation does not match the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifMetagenerationMatch": {
+ // "description": "Makes the operation conditional on whether the destination object's current metageneration matches the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifMetagenerationNotMatch": {
+ // "description": "Makes the operation conditional on whether the destination object's current metageneration does not match the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifSourceGenerationMatch": {
+ // "description": "Makes the operation conditional on whether the source object's generation matches the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifSourceGenerationNotMatch": {
+ // "description": "Makes the operation conditional on whether the source object's generation does not match the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifSourceMetagenerationMatch": {
+ // "description": "Makes the operation conditional on whether the source object's current metageneration matches the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifSourceMetagenerationNotMatch": {
+ // "description": "Makes the operation conditional on whether the source object's current metageneration does not match the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "maxBytesRewrittenPerCall": {
+ // "description": "The maximum number of bytes that will be rewritten per rewrite request. Most callers shouldn't need to specify this parameter - it is primarily in place to support testing. If specified the value must be an integral multiple of 1 MiB (1048576). Also, this only applies to requests where the source and destination span locations and/or storage classes. Finally, this value must not change across rewrite calls else you'll get an error that the rewriteToken is invalid.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "projection": {
+ // "description": "Set of properties to return. Defaults to noAcl, unless the object resource specifies the acl property, when it defaults to full.",
+ // "enum": [
+ // "full",
+ // "noAcl"
+ // ],
+ // "enumDescriptions": [
+ // "Include all properties.",
+ // "Omit the owner, acl property."
+ // ],
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "rewriteToken": {
+ // "description": "Include this field (from the previous rewrite response) on each rewrite request after the first one, until the rewrite response 'done' flag is true. Calls that provide a rewriteToken can omit all other request fields, but if included those fields must match the values provided in the first rewrite request.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "sourceBucket": {
+ // "description": "Name of the bucket in which to find the source object.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "sourceGeneration": {
+ // "description": "If present, selects a specific revision of the source object (as opposed to the latest version, the default).",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "sourceObject": {
+ // "description": "Name of the source object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "b/{sourceBucket}/o/{sourceObject}/rewriteTo/b/{destinationBucket}/o/{destinationObject}",
+ // "request": {
+ // "$ref": "Object"
+ // },
+ // "response": {
+ // "$ref": "RewriteResponse"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/devstorage.full_control",
+ // "https://www.googleapis.com/auth/devstorage.read_write"
+ // ]
+ // }
+
+}
+
+// method id "storage.objects.update":
+
+type ObjectsUpdateCall struct {
+ s *Service
+ bucket string
+ object string
+ object2 *Object
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+}
+
+// Update: Updates an object's metadata.
+func (r *ObjectsService) Update(bucket string, object string, object2 *Object) *ObjectsUpdateCall {
+ c := &ObjectsUpdateCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.bucket = bucket
+ c.object = object
+ c.object2 = object2
+ return c
+}
+
+// Generation sets the optional parameter "generation": If present,
+// selects a specific revision of this object (as opposed to the latest
+// version, the default).
+func (c *ObjectsUpdateCall) Generation(generation int64) *ObjectsUpdateCall {
+ c.urlParams_.Set("generation", fmt.Sprint(generation))
+ return c
+}
+
+// IfGenerationMatch sets the optional parameter "ifGenerationMatch":
+// Makes the operation conditional on whether the object's current
+// generation matches the given value.
+func (c *ObjectsUpdateCall) IfGenerationMatch(ifGenerationMatch int64) *ObjectsUpdateCall {
+ c.urlParams_.Set("ifGenerationMatch", fmt.Sprint(ifGenerationMatch))
+ return c
+}
+
+// IfGenerationNotMatch sets the optional parameter
+// "ifGenerationNotMatch": Makes the operation conditional on whether
+// the object's current generation does not match the given value.
+func (c *ObjectsUpdateCall) IfGenerationNotMatch(ifGenerationNotMatch int64) *ObjectsUpdateCall {
+ c.urlParams_.Set("ifGenerationNotMatch", fmt.Sprint(ifGenerationNotMatch))
+ return c
+}
+
+// IfMetagenerationMatch sets the optional parameter
+// "ifMetagenerationMatch": Makes the operation conditional on whether
+// the object's current metageneration matches the given value.
+func (c *ObjectsUpdateCall) IfMetagenerationMatch(ifMetagenerationMatch int64) *ObjectsUpdateCall {
+ c.urlParams_.Set("ifMetagenerationMatch", fmt.Sprint(ifMetagenerationMatch))
+ return c
+}
+
+// IfMetagenerationNotMatch sets the optional parameter
+// "ifMetagenerationNotMatch": Makes the operation conditional on
+// whether the object's current metageneration does not match the given
+// value.
+func (c *ObjectsUpdateCall) IfMetagenerationNotMatch(ifMetagenerationNotMatch int64) *ObjectsUpdateCall {
+ c.urlParams_.Set("ifMetagenerationNotMatch", fmt.Sprint(ifMetagenerationNotMatch))
+ return c
+}
+
+// PredefinedAcl sets the optional parameter "predefinedAcl": Apply a
+// predefined set of access controls to this object.
+//
+// Possible values:
+// "authenticatedRead" - Object owner gets OWNER access, and
+// allAuthenticatedUsers get READER access.
+// "bucketOwnerFullControl" - Object owner gets OWNER access, and
+// project team owners get OWNER access.
+// "bucketOwnerRead" - Object owner gets OWNER access, and project
+// team owners get READER access.
+// "private" - Object owner gets OWNER access.
+// "projectPrivate" - Object owner gets OWNER access, and project team
+// members get access according to their roles.
+// "publicRead" - Object owner gets OWNER access, and allUsers get
+// READER access.
+func (c *ObjectsUpdateCall) PredefinedAcl(predefinedAcl string) *ObjectsUpdateCall {
+ c.urlParams_.Set("predefinedAcl", predefinedAcl)
+ return c
+}
+
+// Projection sets the optional parameter "projection": Set of
+// properties to return. Defaults to full.
+//
+// Possible values:
+// "full" - Include all properties.
+// "noAcl" - Omit the owner, acl property.
+func (c *ObjectsUpdateCall) Projection(projection string) *ObjectsUpdateCall {
+ c.urlParams_.Set("projection", projection)
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ObjectsUpdateCall) Fields(s ...googleapi.Field) *ObjectsUpdateCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do and Download
+// methods. Any pending HTTP request will be aborted if the provided
+// context is canceled.
+func (c *ObjectsUpdateCall) Context(ctx context.Context) *ObjectsUpdateCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := make(http.Header)
+ reqHeaders.Set("User-Agent", c.s.userAgent())
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.object2)
+ if err != nil {
+ return nil, err
+ }
+ reqHeaders.Set("Content-Type", "application/json")
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/{object}")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("PUT", urls, body)
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "bucket": c.bucket,
+ "object": c.object,
+ })
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Download fetches the API endpoint's "media" value, instead of the normal
+// API response value. If the returned error is nil, the Response is guaranteed to
+// have a 2xx status code. Callers must close the Response.Body as usual.
+func (c *ObjectsUpdateCall) Download(opts ...googleapi.CallOption) (*http.Response, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("media")
+ if err != nil {
+ return nil, err
+ }
+ if err := googleapi.CheckMediaResponse(res); err != nil {
+ res.Body.Close()
+ return nil, err
+ }
+ return res, nil
+}
+
+// Do executes the "storage.objects.update" call.
+// Exactly one of *Object or error will be non-nil. Any non-2xx status
+// code is an error. Response headers are in either
+// *Object.ServerResponse.Header or (if a response was returned at all)
+// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
+// check whether the returned error was because http.StatusNotModified
+// was returned.
+func (c *ObjectsUpdateCall) Do(opts ...googleapi.CallOption) (*Object, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &Object{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Updates an object's metadata.",
+ // "httpMethod": "PUT",
+ // "id": "storage.objects.update",
+ // "parameterOrder": [
+ // "bucket",
+ // "object"
+ // ],
+ // "parameters": {
+ // "bucket": {
+ // "description": "Name of the bucket in which the object resides.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "generation": {
+ // "description": "If present, selects a specific revision of this object (as opposed to the latest version, the default).",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifGenerationMatch": {
+ // "description": "Makes the operation conditional on whether the object's current generation matches the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifGenerationNotMatch": {
+ // "description": "Makes the operation conditional on whether the object's current generation does not match the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifMetagenerationMatch": {
+ // "description": "Makes the operation conditional on whether the object's current metageneration matches the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "ifMetagenerationNotMatch": {
+ // "description": "Makes the operation conditional on whether the object's current metageneration does not match the given value.",
+ // "format": "int64",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "object": {
+ // "description": "Name of the object. For information about how to URL encode object names to be path safe, see Encoding URI Path Parts.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "predefinedAcl": {
+ // "description": "Apply a predefined set of access controls to this object.",
+ // "enum": [
+ // "authenticatedRead",
+ // "bucketOwnerFullControl",
+ // "bucketOwnerRead",
+ // "private",
+ // "projectPrivate",
+ // "publicRead"
+ // ],
+ // "enumDescriptions": [
+ // "Object owner gets OWNER access, and allAuthenticatedUsers get READER access.",
+ // "Object owner gets OWNER access, and project team owners get OWNER access.",
+ // "Object owner gets OWNER access, and project team owners get READER access.",
+ // "Object owner gets OWNER access.",
+ // "Object owner gets OWNER access, and project team members get access according to their roles.",
+ // "Object owner gets OWNER access, and allUsers get READER access."
+ // ],
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "projection": {
+ // "description": "Set of properties to return. Defaults to full.",
+ // "enum": [
+ // "full",
+ // "noAcl"
+ // ],
+ // "enumDescriptions": [
+ // "Include all properties.",
+ // "Omit the owner, acl property."
+ // ],
+ // "location": "query",
+ // "type": "string"
+ // }
+ // },
+ // "path": "b/{bucket}/o/{object}",
+ // "request": {
+ // "$ref": "Object"
+ // },
+ // "response": {
+ // "$ref": "Object"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/devstorage.full_control"
+ // ],
+ // "supportsMediaDownload": true,
+ // "useMediaDownloadService": true
+ // }
+
+}
+
+// method id "storage.objects.watchAll":
+
+type ObjectsWatchAllCall struct {
+ s *Service
+ bucket string
+ channel *Channel
+ urlParams_ gensupport.URLParams
+ ctx_ context.Context
+}
+
+// WatchAll: Watch for changes on all objects in a bucket.
+func (r *ObjectsService) WatchAll(bucket string, channel *Channel) *ObjectsWatchAllCall {
+ c := &ObjectsWatchAllCall{s: r.s, urlParams_: make(gensupport.URLParams)}
+ c.bucket = bucket
+ c.channel = channel
+ return c
+}
+
+// Delimiter sets the optional parameter "delimiter": Returns results in
+// a directory-like mode. items will contain only objects whose names,
+// aside from the prefix, do not contain delimiter. Objects whose names,
+// aside from the prefix, contain delimiter will have their name,
+// truncated after the delimiter, returned in prefixes. Duplicate
+// prefixes are omitted.
+func (c *ObjectsWatchAllCall) Delimiter(delimiter string) *ObjectsWatchAllCall {
+ c.urlParams_.Set("delimiter", delimiter)
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum number
+// of items plus prefixes to return. As duplicate prefixes are omitted,
+// fewer total results may be returned than requested. The default value
+// of this parameter is 1,000 items.
+func (c *ObjectsWatchAllCall) MaxResults(maxResults int64) *ObjectsWatchAllCall {
+ c.urlParams_.Set("maxResults", fmt.Sprint(maxResults))
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": A
+// previously-returned page token representing part of the larger set of
+// results to view.
+func (c *ObjectsWatchAllCall) PageToken(pageToken string) *ObjectsWatchAllCall {
+ c.urlParams_.Set("pageToken", pageToken)
+ return c
+}
+
+// Prefix sets the optional parameter "prefix": Filter results to
+// objects whose names begin with this prefix.
+func (c *ObjectsWatchAllCall) Prefix(prefix string) *ObjectsWatchAllCall {
+ c.urlParams_.Set("prefix", prefix)
+ return c
+}
+
+// Projection sets the optional parameter "projection": Set of
+// properties to return. Defaults to noAcl.
+//
+// Possible values:
+// "full" - Include all properties.
+// "noAcl" - Omit the owner, acl property.
+func (c *ObjectsWatchAllCall) Projection(projection string) *ObjectsWatchAllCall {
+ c.urlParams_.Set("projection", projection)
+ return c
+}
+
+// Versions sets the optional parameter "versions": If true, lists all
+// versions of an object as distinct results. The default is false. For
+// more information, see Object Versioning.
+func (c *ObjectsWatchAllCall) Versions(versions bool) *ObjectsWatchAllCall {
+ c.urlParams_.Set("versions", fmt.Sprint(versions))
+ return c
+}
+
+// Fields allows partial responses to be retrieved. See
+// https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ObjectsWatchAllCall) Fields(s ...googleapi.Field) *ObjectsWatchAllCall {
+ c.urlParams_.Set("fields", googleapi.CombineFields(s))
+ return c
+}
+
+// Context sets the context to be used in this call's Do method. Any
+// pending HTTP request will be aborted if the provided context is
+// canceled.
+func (c *ObjectsWatchAllCall) Context(ctx context.Context) *ObjectsWatchAllCall {
+ c.ctx_ = ctx
+ return c
+}
+
+func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) {
+ reqHeaders := make(http.Header)
+ reqHeaders.Set("User-Agent", c.s.userAgent())
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.channel)
+ if err != nil {
+ return nil, err
+ }
+ reqHeaders.Set("Content-Type", "application/json")
+ c.urlParams_.Set("alt", alt)
+ urls := googleapi.ResolveRelative(c.s.BasePath, "b/{bucket}/o/watch")
+ urls += "?" + c.urlParams_.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ req.Header = reqHeaders
+ googleapi.Expand(req.URL, map[string]string{
+ "bucket": c.bucket,
+ })
+ if c.ctx_ != nil {
+ return ctxhttp.Do(c.ctx_, c.s.client, req)
+ }
+ return c.s.client.Do(req)
+}
+
+// Do executes the "storage.objects.watchAll" call.
+// Exactly one of *Channel or error will be non-nil. Any non-2xx status
+// code is an error. Response headers are in either
+// *Channel.ServerResponse.Header or (if a response was returned at all)
+// in error.(*googleapi.Error).Header. Use googleapi.IsNotModified to
+// check whether the returned error was because http.StatusNotModified
+// was returned.
+func (c *ObjectsWatchAllCall) Do(opts ...googleapi.CallOption) (*Channel, error) {
+ gensupport.SetOptions(c.urlParams_, opts...)
+ res, err := c.doRequest("json")
+ if res != nil && res.StatusCode == http.StatusNotModified {
+ if res.Body != nil {
+ res.Body.Close()
+ }
+ return nil, &googleapi.Error{
+ Code: res.StatusCode,
+ Header: res.Header,
+ }
+ }
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ ret := &Channel{
+ ServerResponse: googleapi.ServerResponse{
+ Header: res.Header,
+ HTTPStatusCode: res.StatusCode,
+ },
+ }
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Watch for changes on all objects in a bucket.",
+ // "httpMethod": "POST",
+ // "id": "storage.objects.watchAll",
+ // "parameterOrder": [
+ // "bucket"
+ // ],
+ // "parameters": {
+ // "bucket": {
+ // "description": "Name of the bucket in which to look for objects.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "delimiter": {
+ // "description": "Returns results in a directory-like mode. items will contain only objects whose names, aside from the prefix, do not contain delimiter. Objects whose names, aside from the prefix, contain delimiter will have their name, truncated after the delimiter, returned in prefixes. Duplicate prefixes are omitted.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "maxResults": {
+ // "description": "Maximum number of items plus prefixes to return. As duplicate prefixes are omitted, fewer total results may be returned than requested. The default value of this parameter is 1,000 items.",
+ // "format": "uint32",
+ // "location": "query",
+ // "minimum": "0",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "A previously-returned page token representing part of the larger set of results to view.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "prefix": {
+ // "description": "Filter results to objects whose names begin with this prefix.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "projection": {
+ // "description": "Set of properties to return. Defaults to noAcl.",
+ // "enum": [
+ // "full",
+ // "noAcl"
+ // ],
+ // "enumDescriptions": [
+ // "Include all properties.",
+ // "Omit the owner, acl property."
+ // ],
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "versions": {
+ // "description": "If true, lists all versions of an object as distinct results. The default is false. For more information, see Object Versioning.",
+ // "location": "query",
+ // "type": "boolean"
+ // }
+ // },
+ // "path": "b/{bucket}/o/watch",
+ // "request": {
+ // "$ref": "Channel",
+ // "parameterName": "resource"
+ // },
+ // "response": {
+ // "$ref": "Channel"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform",
+ // "https://www.googleapis.com/auth/cloud-platform.read-only",
+ // "https://www.googleapis.com/auth/devstorage.full_control",
+ // "https://www.googleapis.com/auth/devstorage.read_only",
+ // "https://www.googleapis.com/auth/devstorage.read_write"
+ // ],
+ // "supportsSubscription": true
+ // }
+
+}
diff --git a/vendor/google.golang.org/appengine/LICENSE b/vendor/google.golang.org/appengine/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/google.golang.org/appengine/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/google.golang.org/appengine/README.md b/vendor/google.golang.org/appengine/README.md
new file mode 100644
index 0000000..1efd955
--- /dev/null
+++ b/vendor/google.golang.org/appengine/README.md
@@ -0,0 +1,71 @@
+# Go App Engine packages
+
+[![Build Status](https://travis-ci.org/golang/appengine.svg)](https://travis-ci.org/golang/appengine)
+
+This repository supports the Go runtime on App Engine,
+including both the standard App Engine and the
+"App Engine flexible environment" (formerly known as "Managed VMs").
+It provides APIs for interacting with App Engine services.
+Its canonical import path is `google.golang.org/appengine`.
+
+See https://cloud.google.com/appengine/docs/go/
+for more information.
+
+File issue reports and feature requests on the [Google App Engine issue
+tracker](https://code.google.com/p/googleappengine/issues/entry?template=Go%20defect).
+
+## Directory structure
+The top level directory of this repository is the `appengine` package. It
+contains the
+basic APIs (e.g. `appengine.NewContext`) that apply across APIs. Specific API
+packages are in subdirectories (e.g. `datastore`).
+
+There is an `internal` subdirectory that contains service protocol buffers,
+plus packages required for connectivity to make API calls. App Engine apps
+should not directly import any package under `internal`.
+
+## Updating a Go App Engine app
+
+This section describes how to update an older Go App Engine app to use
+these packages. A provided tool, `aefix`, can help automate steps 2 and 3
+(run `go get google.golang.org/appengine/cmd/aefix` to install it), but
+read the details below since `aefix` can't perform all the changes.
+
+### 1. Update YAML files (App Engine flexible environment / Managed VMs only)
+
+The `app.yaml` file (and YAML files for modules) should have these new lines added:
+```
+vm: true
+```
+See https://cloud.google.com/appengine/docs/go/modules/#Go_Instance_scaling_and_class for details.
+
+### 2. Update import paths
+
+The import paths for App Engine packages are now fully qualified, based at `google.golang.org/appengine`.
+You will need to update your code to use import paths starting with that; for instance,
+code importing `appengine/datastore` will now need to import `google.golang.org/appengine/datastore`.
+
+### 3. Update code using deprecated, removed or modified APIs
+
+Most App Engine services are available with exactly the same API.
+A few APIs were cleaned up, and some are not available yet.
+This list summarises the differences:
+
+* `appengine.Context` has been replaced with the `Context` type from `golang.org/x/net/context`.
+* Logging methods that were on `appengine.Context` are now functions in `google.golang.org/appengine/log`.
+* `appengine.Timeout` has been removed. Use `context.WithTimeout` instead.
+* `appengine.Datacenter` now takes a `context.Context` argument.
+* `datastore.PropertyLoadSaver` has been simplified to use slices in place of channels.
+* `delay.Call` now returns an error.
+* `search.FieldLoadSaver` now handles document metadata.
+* `urlfetch.Transport` no longer has a Deadline field; set a deadline on the
+ `context.Context` instead.
+* `aetest` no longer declares its own Context type, and uses the standard one instead.
+* `taskqueue.QueueStats` no longer takes a maxTasks argument. That argument has been
+ deprecated and unused for a long time.
+* `appengine.BackendHostname` and `appengine.BackendInstance` were for the deprecated backends feature.
+ Use `appengine.ModuleHostname`and `appengine.ModuleName` instead.
+* Most of `appengine/file` and parts of `appengine/blobstore` are deprecated.
+ Use [Google Cloud Storage](https://godoc.org/google.golang.org/cloud/storage) instead.
+* `appengine/socket` is not required on App Engine flexible environment / Managed VMs.
+ Use the standard `net` package instead.
diff --git a/vendor/google.golang.org/appengine/appengine.go b/vendor/google.golang.org/appengine/appengine.go
new file mode 100644
index 0000000..aa23a7a
--- /dev/null
+++ b/vendor/google.golang.org/appengine/appengine.go
@@ -0,0 +1,76 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package appengine provides basic functionality for Google App Engine.
+//
+// For more information on how to write Go apps for Google App Engine, see:
+// https://cloud.google.com/appengine/docs/go/
+package appengine // import "google.golang.org/appengine"
+
+import (
+ "net/http"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+)
+
+// IsDevAppServer reports whether the App Engine app is running in the
+// development App Server.
+func IsDevAppServer() bool {
+ return internal.IsDevAppServer()
+}
+
+// NewContext returns a context for an in-flight HTTP request.
+// This function is cheap.
+func NewContext(req *http.Request) context.Context {
+ return WithContext(context.Background(), req)
+}
+
+// WithContext returns a copy of the parent context
+// and associates it with an in-flight HTTP request.
+// This function is cheap.
+func WithContext(parent context.Context, req *http.Request) context.Context {
+ return internal.WithContext(parent, req)
+}
+
+// TODO(dsymonds): Add a Call function here? Otherwise other packages can't access internal.Call.
+
+// BlobKey is a key for a blobstore blob.
+//
+// Conceptually, this type belongs in the blobstore package, but it lives in
+// the appengine package to avoid a circular dependency: blobstore depends on
+// datastore, and datastore needs to refer to the BlobKey type.
+type BlobKey string
+
+// GeoPoint represents a location as latitude/longitude in degrees.
+type GeoPoint struct {
+ Lat, Lng float64
+}
+
+// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude.
+func (g GeoPoint) Valid() bool {
+ return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180
+}
+
+// APICallFunc defines a function type for handling an API call.
+// See WithCallOverride.
+type APICallFunc func(ctx context.Context, service, method string, in, out proto.Message) error
+
+// WithAPICallFunc returns a copy of the parent context
+// that will cause API calls to invoke f instead of their normal operation.
+//
+// This is intended for advanced users only.
+func WithAPICallFunc(ctx context.Context, f APICallFunc) context.Context {
+ return internal.WithCallOverride(ctx, internal.CallOverrideFunc(f))
+}
+
+// APICall performs an API call.
+//
+// This is not intended for general use; it is exported for use in conjunction
+// with WithAPICallFunc.
+func APICall(ctx context.Context, service, method string, in, out proto.Message) error {
+ return internal.Call(ctx, service, method, in, out)
+}
diff --git a/vendor/google.golang.org/appengine/appengine_vm.go b/vendor/google.golang.org/appengine/appengine_vm.go
new file mode 100644
index 0000000..e4399ed
--- /dev/null
+++ b/vendor/google.golang.org/appengine/appengine_vm.go
@@ -0,0 +1,56 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package appengine
+
+import (
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+)
+
+// The comment below must not be changed.
+// It is used by go-app-builder to recognise that this package has
+// the Main function to use in the synthetic main.
+// The gophers party all night; the rabbits provide the beats.
+
+// Main is the principal entry point for an app running in App Engine "flexible environment".
+// It installs a trivial health checker if one isn't already registered,
+// and starts listening on port 8080 (overridden by the $PORT environment
+// variable).
+//
+// See https://cloud.google.com/appengine/docs/flexible/custom-runtimes#health_check_requests
+// for details on how to do your own health checking.
+//
+// Main never returns.
+//
+// Main is designed so that the app's main package looks like this:
+//
+// package main
+//
+// import (
+// "google.golang.org/appengine"
+//
+// _ "myapp/package0"
+// _ "myapp/package1"
+// )
+//
+// func main() {
+// appengine.Main()
+// }
+//
+// The "myapp/packageX" packages are expected to register HTTP handlers
+// in their init functions.
+func Main() {
+ internal.Main()
+}
+
+// BackgroundContext returns a context not associated with a request.
+// This should only be used when not servicing a request.
+// This only works in App Engine "flexible environment".
+func BackgroundContext() context.Context {
+ return internal.BackgroundContext()
+}
diff --git a/vendor/google.golang.org/appengine/errors.go b/vendor/google.golang.org/appengine/errors.go
new file mode 100644
index 0000000..16d0772
--- /dev/null
+++ b/vendor/google.golang.org/appengine/errors.go
@@ -0,0 +1,46 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// This file provides error functions for common API failure modes.
+
+package appengine
+
+import (
+ "fmt"
+
+ "google.golang.org/appengine/internal"
+)
+
+// IsOverQuota reports whether err represents an API call failure
+// due to insufficient available quota.
+func IsOverQuota(err error) bool {
+ callErr, ok := err.(*internal.CallError)
+ return ok && callErr.Code == 4
+}
+
+// MultiError is returned by batch operations when there are errors with
+// particular elements. Errors will be in a one-to-one correspondence with
+// the input elements; successful elements will have a nil entry.
+type MultiError []error
+
+func (m MultiError) Error() string {
+ s, n := "", 0
+ for _, e := range m {
+ if e != nil {
+ if n == 0 {
+ s = e.Error()
+ }
+ n++
+ }
+ }
+ switch n {
+ case 0:
+ return "(0 errors)"
+ case 1:
+ return s
+ case 2:
+ return s + " (and 1 other error)"
+ }
+ return fmt.Sprintf("%s (and %d other errors)", s, n-1)
+}
diff --git a/vendor/google.golang.org/appengine/identity.go b/vendor/google.golang.org/appengine/identity.go
new file mode 100644
index 0000000..b8dcf8f
--- /dev/null
+++ b/vendor/google.golang.org/appengine/identity.go
@@ -0,0 +1,142 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package appengine
+
+import (
+ "time"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/app_identity"
+ modpb "google.golang.org/appengine/internal/modules"
+)
+
+// AppID returns the application ID for the current application.
+// The string will be a plain application ID (e.g. "appid"), with a
+// domain prefix for custom domain deployments (e.g. "example.com:appid").
+func AppID(c context.Context) string { return internal.AppID(c) }
+
+// DefaultVersionHostname returns the standard hostname of the default version
+// of the current application (e.g. "my-app.appspot.com"). This is suitable for
+// use in constructing URLs.
+func DefaultVersionHostname(c context.Context) string {
+ return internal.DefaultVersionHostname(c)
+}
+
+// ModuleName returns the module name of the current instance.
+func ModuleName(c context.Context) string {
+ return internal.ModuleName(c)
+}
+
+// ModuleHostname returns a hostname of a module instance.
+// If module is the empty string, it refers to the module of the current instance.
+// If version is empty, it refers to the version of the current instance if valid,
+// or the default version of the module of the current instance.
+// If instance is empty, ModuleHostname returns the load-balancing hostname.
+func ModuleHostname(c context.Context, module, version, instance string) (string, error) {
+ req := &modpb.GetHostnameRequest{}
+ if module != "" {
+ req.Module = &module
+ }
+ if version != "" {
+ req.Version = &version
+ }
+ if instance != "" {
+ req.Instance = &instance
+ }
+ res := &modpb.GetHostnameResponse{}
+ if err := internal.Call(c, "modules", "GetHostname", req, res); err != nil {
+ return "", err
+ }
+ return *res.Hostname, nil
+}
+
+// VersionID returns the version ID for the current application.
+// It will be of the form "X.Y", where X is specified in app.yaml,
+// and Y is a number generated when each version of the app is uploaded.
+// It does not include a module name.
+func VersionID(c context.Context) string { return internal.VersionID(c) }
+
+// InstanceID returns a mostly-unique identifier for this instance.
+func InstanceID() string { return internal.InstanceID() }
+
+// Datacenter returns an identifier for the datacenter that the instance is running in.
+func Datacenter(c context.Context) string { return internal.Datacenter(c) }
+
+// ServerSoftware returns the App Engine release version.
+// In production, it looks like "Google App Engine/X.Y.Z".
+// In the development appserver, it looks like "Development/X.Y".
+func ServerSoftware() string { return internal.ServerSoftware() }
+
+// RequestID returns a string that uniquely identifies the request.
+func RequestID(c context.Context) string { return internal.RequestID(c) }
+
+// AccessToken generates an OAuth2 access token for the specified scopes on
+// behalf of service account of this application. This token will expire after
+// the returned time.
+func AccessToken(c context.Context, scopes ...string) (token string, expiry time.Time, err error) {
+ req := &pb.GetAccessTokenRequest{Scope: scopes}
+ res := &pb.GetAccessTokenResponse{}
+
+ err = internal.Call(c, "app_identity_service", "GetAccessToken", req, res)
+ if err != nil {
+ return "", time.Time{}, err
+ }
+ return res.GetAccessToken(), time.Unix(res.GetExpirationTime(), 0), nil
+}
+
+// Certificate represents a public certificate for the app.
+type Certificate struct {
+ KeyName string
+ Data []byte // PEM-encoded X.509 certificate
+}
+
+// PublicCertificates retrieves the public certificates for the app.
+// They can be used to verify a signature returned by SignBytes.
+func PublicCertificates(c context.Context) ([]Certificate, error) {
+ req := &pb.GetPublicCertificateForAppRequest{}
+ res := &pb.GetPublicCertificateForAppResponse{}
+ if err := internal.Call(c, "app_identity_service", "GetPublicCertificatesForApp", req, res); err != nil {
+ return nil, err
+ }
+ var cs []Certificate
+ for _, pc := range res.PublicCertificateList {
+ cs = append(cs, Certificate{
+ KeyName: pc.GetKeyName(),
+ Data: []byte(pc.GetX509CertificatePem()),
+ })
+ }
+ return cs, nil
+}
+
+// ServiceAccount returns a string representing the service account name, in
+// the form of an email address (typically app_id@appspot.gserviceaccount.com).
+func ServiceAccount(c context.Context) (string, error) {
+ req := &pb.GetServiceAccountNameRequest{}
+ res := &pb.GetServiceAccountNameResponse{}
+
+ err := internal.Call(c, "app_identity_service", "GetServiceAccountName", req, res)
+ if err != nil {
+ return "", err
+ }
+ return res.GetServiceAccountName(), err
+}
+
+// SignBytes signs bytes using a private key unique to your application.
+func SignBytes(c context.Context, bytes []byte) (keyName string, signature []byte, err error) {
+ req := &pb.SignForAppRequest{BytesToSign: bytes}
+ res := &pb.SignForAppResponse{}
+
+ if err := internal.Call(c, "app_identity_service", "SignForApp", req, res); err != nil {
+ return "", nil, err
+ }
+ return res.GetKeyName(), res.GetSignatureBytes(), nil
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("app_identity_service", pb.AppIdentityServiceError_ErrorCode_name)
+ internal.RegisterErrorCodeMap("modules", modpb.ModulesServiceError_ErrorCode_name)
+}
diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go
new file mode 100644
index 0000000..a2a4b4c
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api.go
@@ -0,0 +1,509 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package internal
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ netcontext "golang.org/x/net/context"
+
+ remotepb "google.golang.org/appengine/internal/remote_api"
+)
+
+const (
+ apiPath = "/rpc_http"
+)
+
+var (
+ // Incoming headers.
+ ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket")
+ dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo")
+ traceHeader = http.CanonicalHeaderKey("X-Cloud-Trace-Context")
+ curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace")
+ userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP")
+ remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr")
+
+ // Outgoing headers.
+ apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint")
+ apiEndpointHeaderValue = []string{"app-engine-apis"}
+ apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method")
+ apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"}
+ apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline")
+ apiContentType = http.CanonicalHeaderKey("Content-Type")
+ apiContentTypeValue = []string{"application/octet-stream"}
+
+ apiHTTPClient = &http.Client{
+ Transport: &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ Dial: limitDial,
+ },
+ }
+)
+
+func apiURL() *url.URL {
+ host, port := "appengine.googleapis.internal", "10001"
+ if h := os.Getenv("API_HOST"); h != "" {
+ host = h
+ }
+ if p := os.Getenv("API_PORT"); p != "" {
+ port = p
+ }
+ return &url.URL{
+ Scheme: "http",
+ Host: host + ":" + port,
+ Path: apiPath,
+ }
+}
+
+func handleHTTP(w http.ResponseWriter, r *http.Request) {
+ c := &context{
+ req: r,
+ outHeader: w.Header(),
+ apiURL: apiURL(),
+ logger: globalLogger(),
+ }
+
+ ctxs.Lock()
+ ctxs.m[r] = c
+ ctxs.Unlock()
+ defer func() {
+ ctxs.Lock()
+ delete(ctxs.m, r)
+ ctxs.Unlock()
+ }()
+
+ // Patch up RemoteAddr so it looks reasonable.
+ if addr := r.Header.Get(userIPHeader); addr != "" {
+ r.RemoteAddr = addr
+ } else if addr = r.Header.Get(remoteAddrHeader); addr != "" {
+ r.RemoteAddr = addr
+ } else {
+ // Should not normally reach here, but pick a sensible default anyway.
+ r.RemoteAddr = "127.0.0.1"
+ }
+ // The address in the headers will most likely be of these forms:
+ // 123.123.123.123
+ // 2001:db8::1
+ // net/http.Request.RemoteAddr is specified to be in "IP:port" form.
+ if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil {
+ // Assume the remote address is only a host; add a default port.
+ r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80")
+ }
+
+ executeRequestSafely(c, r)
+ c.outHeader = nil // make sure header changes aren't respected any more
+
+ // Avoid nil Write call if c.Write is never called.
+ if c.outCode != 0 {
+ w.WriteHeader(c.outCode)
+ }
+ if c.outBody != nil {
+ w.Write(c.outBody)
+ }
+}
+
+func executeRequestSafely(c *context, r *http.Request) {
+ defer func() {
+ if x := recover(); x != nil {
+ logf(c, 4, "%s", renderPanic(x)) // 4 == critical
+ c.outCode = 500
+ }
+ }()
+
+ http.DefaultServeMux.ServeHTTP(c, r)
+}
+
+func renderPanic(x interface{}) string {
+ buf := make([]byte, 16<<10) // 16 KB should be plenty
+ buf = buf[:runtime.Stack(buf, false)]
+
+ // Remove the first few stack frames:
+ // this func
+ // the recover closure in the caller
+ // That will root the stack trace at the site of the panic.
+ const (
+ skipStart = "internal.renderPanic"
+ skipFrames = 2
+ )
+ start := bytes.Index(buf, []byte(skipStart))
+ p := start
+ for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ {
+ p = bytes.IndexByte(buf[p+1:], '\n') + p + 1
+ if p < 0 {
+ break
+ }
+ }
+ if p >= 0 {
+ // buf[start:p+1] is the block to remove.
+ // Copy buf[p+1:] over buf[start:] and shrink buf.
+ copy(buf[start:], buf[p+1:])
+ buf = buf[:len(buf)-(p+1-start)]
+ }
+
+ // Add panic heading.
+ head := fmt.Sprintf("panic: %v\n\n", x)
+ if len(head) > len(buf) {
+ // Extremely unlikely to happen.
+ return head
+ }
+ copy(buf[len(head):], buf)
+ copy(buf, head)
+
+ return string(buf)
+}
+
+var ctxs = struct {
+ sync.Mutex
+ m map[*http.Request]*context
+ bg *context // background context, lazily initialized
+ // dec is used by tests to decorate the netcontext.Context returned
+ // for a given request. This allows tests to add overrides (such as
+ // WithAppIDOverride) to the context. The map is nil outside tests.
+ dec map[*http.Request]func(netcontext.Context) netcontext.Context
+}{
+ m: make(map[*http.Request]*context),
+}
+
+// context represents the context of an in-flight HTTP request.
+// It implements the appengine.Context and http.ResponseWriter interfaces.
+type context struct {
+ req *http.Request
+ logger *jsonLogger
+
+ outCode int
+ outHeader http.Header
+ outBody []byte
+
+ apiURL *url.URL
+}
+
+var contextKey = "holds a *context"
+
+func fromContext(ctx netcontext.Context) *context {
+ c, _ := ctx.Value(&contextKey).(*context)
+ return c
+}
+
+func withContext(parent netcontext.Context, c *context) netcontext.Context {
+ ctx := netcontext.WithValue(parent, &contextKey, c)
+ if ns := c.req.Header.Get(curNamespaceHeader); ns != "" {
+ ctx = withNamespace(ctx, ns)
+ }
+ return ctx
+}
+
+func toContext(c *context) netcontext.Context {
+ return withContext(netcontext.Background(), c)
+}
+
+func IncomingHeaders(ctx netcontext.Context) http.Header {
+ if c := fromContext(ctx); c != nil {
+ return c.req.Header
+ }
+ return nil
+}
+
+func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
+ ctxs.Lock()
+ c := ctxs.m[req]
+ d := ctxs.dec[req]
+ ctxs.Unlock()
+
+ if d != nil {
+ parent = d(parent)
+ }
+
+ if c == nil {
+ // Someone passed in an http.Request that is not in-flight.
+ // We panic here rather than panicking at a later point
+ // so that stack traces will be more sensible.
+ log.Panic("appengine: NewContext passed an unknown http.Request")
+ }
+ return withContext(parent, c)
+}
+
+func BackgroundContext() netcontext.Context {
+ ctxs.Lock()
+ defer ctxs.Unlock()
+
+ if ctxs.bg != nil {
+ return toContext(ctxs.bg)
+ }
+
+ // Compute background security ticket.
+ appID := partitionlessAppID()
+ escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1)
+ majVersion := VersionID(nil)
+ if i := strings.Index(majVersion, "."); i > 0 {
+ majVersion = majVersion[:i]
+ }
+ ticket := fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID())
+
+ ctxs.bg = &context{
+ req: &http.Request{
+ Header: http.Header{
+ ticketHeader: []string{ticket},
+ },
+ },
+ apiURL: apiURL(),
+ logger: globalLogger(),
+ }
+
+ return toContext(ctxs.bg)
+}
+
+// RegisterTestRequest registers the HTTP request req for testing, such that
+// any API calls are sent to the provided URL. It returns a closure to delete
+// the registration.
+// It should only be used by aetest package.
+func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) func() {
+ c := &context{
+ req: req,
+ apiURL: apiURL,
+ logger: globalLogger(),
+ }
+ ctxs.Lock()
+ defer ctxs.Unlock()
+ if _, ok := ctxs.m[req]; ok {
+ log.Panic("req already associated with context")
+ }
+ if _, ok := ctxs.dec[req]; ok {
+ log.Panic("req already associated with context")
+ }
+ if ctxs.dec == nil {
+ ctxs.dec = make(map[*http.Request]func(netcontext.Context) netcontext.Context)
+ }
+ ctxs.m[req] = c
+ ctxs.dec[req] = decorate
+
+ return func() {
+ ctxs.Lock()
+ delete(ctxs.m, req)
+ delete(ctxs.dec, req)
+ ctxs.Unlock()
+ }
+}
+
+var errTimeout = &CallError{
+ Detail: "Deadline exceeded",
+ Code: int32(remotepb.RpcError_CANCELLED),
+ Timeout: true,
+}
+
+func (c *context) Header() http.Header { return c.outHeader }
+
+// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status
+// codes do not permit a response body (nor response entity headers such as
+// Content-Length, Content-Type, etc).
+func bodyAllowedForStatus(status int) bool {
+ switch {
+ case status >= 100 && status <= 199:
+ return false
+ case status == 204:
+ return false
+ case status == 304:
+ return false
+ }
+ return true
+}
+
+func (c *context) Write(b []byte) (int, error) {
+ if c.outCode == 0 {
+ c.WriteHeader(http.StatusOK)
+ }
+ if len(b) > 0 && !bodyAllowedForStatus(c.outCode) {
+ return 0, http.ErrBodyNotAllowed
+ }
+ c.outBody = append(c.outBody, b...)
+ return len(b), nil
+}
+
+func (c *context) WriteHeader(code int) {
+ if c.outCode != 0 {
+ logf(c, 3, "WriteHeader called multiple times on request.") // error level
+ return
+ }
+ c.outCode = code
+}
+
+func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) {
+ hreq := &http.Request{
+ Method: "POST",
+ URL: c.apiURL,
+ Header: http.Header{
+ apiEndpointHeader: apiEndpointHeaderValue,
+ apiMethodHeader: apiMethodHeaderValue,
+ apiContentType: apiContentTypeValue,
+ apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)},
+ },
+ Body: ioutil.NopCloser(bytes.NewReader(body)),
+ ContentLength: int64(len(body)),
+ Host: c.apiURL.Host,
+ }
+ if info := c.req.Header.Get(dapperHeader); info != "" {
+ hreq.Header.Set(dapperHeader, info)
+ }
+ if info := c.req.Header.Get(traceHeader); info != "" {
+ hreq.Header.Set(traceHeader, info)
+ }
+
+ tr := apiHTTPClient.Transport.(*http.Transport)
+
+ var timedOut int32 // atomic; set to 1 if timed out
+ t := time.AfterFunc(timeout, func() {
+ atomic.StoreInt32(&timedOut, 1)
+ tr.CancelRequest(hreq)
+ })
+ defer t.Stop()
+ defer func() {
+ // Check if timeout was exceeded.
+ if atomic.LoadInt32(&timedOut) != 0 {
+ err = errTimeout
+ }
+ }()
+
+ hresp, err := apiHTTPClient.Do(hreq)
+ if err != nil {
+ return nil, &CallError{
+ Detail: fmt.Sprintf("service bridge HTTP failed: %v", err),
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ defer hresp.Body.Close()
+ hrespBody, err := ioutil.ReadAll(hresp.Body)
+ if hresp.StatusCode != 200 {
+ return nil, &CallError{
+ Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody),
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ if err != nil {
+ return nil, &CallError{
+ Detail: fmt.Sprintf("service bridge response bad: %v", err),
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ return hrespBody, nil
+}
+
+func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
+ if ns := NamespaceFromContext(ctx); ns != "" {
+ if fn, ok := NamespaceMods[service]; ok {
+ fn(in, ns)
+ }
+ }
+
+ if f, ctx, ok := callOverrideFromContext(ctx); ok {
+ return f(ctx, service, method, in, out)
+ }
+
+ // Handle already-done contexts quickly.
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ c := fromContext(ctx)
+ if c == nil {
+ // Give a good error message rather than a panic lower down.
+ return errors.New("not an App Engine context")
+ }
+
+ // Apply transaction modifications if we're in a transaction.
+ if t := transactionFromContext(ctx); t != nil {
+ if t.finished {
+ return errors.New("transaction context has expired")
+ }
+ applyTransaction(in, &t.transaction)
+ }
+
+ // Default RPC timeout is 60s.
+ timeout := 60 * time.Second
+ if deadline, ok := ctx.Deadline(); ok {
+ timeout = deadline.Sub(time.Now())
+ }
+
+ data, err := proto.Marshal(in)
+ if err != nil {
+ return err
+ }
+
+ ticket := c.req.Header.Get(ticketHeader)
+ req := &remotepb.Request{
+ ServiceName: &service,
+ Method: &method,
+ Request: data,
+ RequestId: &ticket,
+ }
+ hreqBody, err := proto.Marshal(req)
+ if err != nil {
+ return err
+ }
+
+ hrespBody, err := c.post(hreqBody, timeout)
+ if err != nil {
+ return err
+ }
+
+ res := &remotepb.Response{}
+ if err := proto.Unmarshal(hrespBody, res); err != nil {
+ return err
+ }
+ if res.RpcError != nil {
+ ce := &CallError{
+ Detail: res.RpcError.GetDetail(),
+ Code: *res.RpcError.Code,
+ }
+ switch remotepb.RpcError_ErrorCode(ce.Code) {
+ case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED:
+ ce.Timeout = true
+ }
+ return ce
+ }
+ if res.ApplicationError != nil {
+ return &APIError{
+ Service: *req.ServiceName,
+ Detail: res.ApplicationError.GetDetail(),
+ Code: *res.ApplicationError.Code,
+ }
+ }
+ if res.Exception != nil || res.JavaException != nil {
+ // This shouldn't happen, but let's be defensive.
+ return &CallError{
+ Detail: "service bridge returned exception",
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ return proto.Unmarshal(res.Response, out)
+}
+
+func (c *context) Request() *http.Request {
+ return c.req
+}
+
+func ContextForTesting(req *http.Request) netcontext.Context {
+ return toContext(&context{
+ req: req,
+ logger: testLogger,
+ })
+}
diff --git a/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/google.golang.org/appengine/internal/api_classic.go
new file mode 100644
index 0000000..597f66e
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api_classic.go
@@ -0,0 +1,159 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package internal
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "time"
+
+ "appengine"
+ "appengine_internal"
+ basepb "appengine_internal/base"
+
+ "github.com/golang/protobuf/proto"
+ netcontext "golang.org/x/net/context"
+)
+
+var contextKey = "holds an appengine.Context"
+
+func fromContext(ctx netcontext.Context) appengine.Context {
+ c, _ := ctx.Value(&contextKey).(appengine.Context)
+ return c
+}
+
+// This is only for classic App Engine adapters.
+func ClassicContextFromContext(ctx netcontext.Context) appengine.Context {
+ return fromContext(ctx)
+}
+
+func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context {
+ ctx := netcontext.WithValue(parent, &contextKey, c)
+
+ s := &basepb.StringProto{}
+ c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil)
+ if ns := s.GetValue(); ns != "" {
+ ctx = NamespacedContext(ctx, ns)
+ }
+
+ return ctx
+}
+
+func IncomingHeaders(ctx netcontext.Context) http.Header {
+ if c := fromContext(ctx); c != nil {
+ if req, ok := c.Request().(*http.Request); ok {
+ return req.Header
+ }
+ }
+ return nil
+}
+
+func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context {
+ c := appengine.NewContext(req)
+ return withContext(parent, c)
+}
+
+type testingContext struct {
+ appengine.Context
+
+ req *http.Request
+}
+
+func (t *testingContext) FullyQualifiedAppID() string { return "dev~testcontext" }
+func (t *testingContext) Call(service, method string, _, _ appengine_internal.ProtoMessage, _ *appengine_internal.CallOptions) error {
+ if service == "__go__" && method == "GetNamespace" {
+ return nil
+ }
+ return fmt.Errorf("testingContext: unsupported Call")
+}
+func (t *testingContext) Request() interface{} { return t.req }
+
+func ContextForTesting(req *http.Request) netcontext.Context {
+ return withContext(netcontext.Background(), &testingContext{req: req})
+}
+
+func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error {
+ if ns := NamespaceFromContext(ctx); ns != "" {
+ if fn, ok := NamespaceMods[service]; ok {
+ fn(in, ns)
+ }
+ }
+
+ if f, ctx, ok := callOverrideFromContext(ctx); ok {
+ return f(ctx, service, method, in, out)
+ }
+
+ // Handle already-done contexts quickly.
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ default:
+ }
+
+ c := fromContext(ctx)
+ if c == nil {
+ // Give a good error message rather than a panic lower down.
+ return errors.New("not an App Engine context")
+ }
+
+ // Apply transaction modifications if we're in a transaction.
+ if t := transactionFromContext(ctx); t != nil {
+ if t.finished {
+ return errors.New("transaction context has expired")
+ }
+ applyTransaction(in, &t.transaction)
+ }
+
+ var opts *appengine_internal.CallOptions
+ if d, ok := ctx.Deadline(); ok {
+ opts = &appengine_internal.CallOptions{
+ Timeout: d.Sub(time.Now()),
+ }
+ }
+
+ err := c.Call(service, method, in, out, opts)
+ switch v := err.(type) {
+ case *appengine_internal.APIError:
+ return &APIError{
+ Service: v.Service,
+ Detail: v.Detail,
+ Code: v.Code,
+ }
+ case *appengine_internal.CallError:
+ return &CallError{
+ Detail: v.Detail,
+ Code: v.Code,
+ Timeout: v.Timeout,
+ }
+ }
+ return err
+}
+
+func handleHTTP(w http.ResponseWriter, r *http.Request) {
+ panic("handleHTTP called; this should be impossible")
+}
+
+func logf(c appengine.Context, level int64, format string, args ...interface{}) {
+ var fn func(format string, args ...interface{})
+ switch level {
+ case 0:
+ fn = c.Debugf
+ case 1:
+ fn = c.Infof
+ case 2:
+ fn = c.Warningf
+ case 3:
+ fn = c.Errorf
+ case 4:
+ fn = c.Criticalf
+ default:
+ // This shouldn't happen.
+ fn = c.Criticalf
+ }
+ fn(format, args...)
+}
diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go
new file mode 100644
index 0000000..2db33a7
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/api_common.go
@@ -0,0 +1,86 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+ "github.com/golang/protobuf/proto"
+ netcontext "golang.org/x/net/context"
+)
+
+type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error
+
+var callOverrideKey = "holds []CallOverrideFunc"
+
+func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context {
+ // We avoid appending to any existing call override
+ // so we don't risk overwriting a popped stack below.
+ var cofs []CallOverrideFunc
+ if uf, ok := ctx.Value(&callOverrideKey).([]CallOverrideFunc); ok {
+ cofs = append(cofs, uf...)
+ }
+ cofs = append(cofs, f)
+ return netcontext.WithValue(ctx, &callOverrideKey, cofs)
+}
+
+func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) {
+ cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc)
+ if len(cofs) == 0 {
+ return nil, nil, false
+ }
+ // We found a list of overrides; grab the last, and reconstitute a
+ // context that will hide it.
+ f := cofs[len(cofs)-1]
+ ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1])
+ return f, ctx, true
+}
+
+type logOverrideFunc func(level int64, format string, args ...interface{})
+
+var logOverrideKey = "holds a logOverrideFunc"
+
+func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context {
+ return netcontext.WithValue(ctx, &logOverrideKey, f)
+}
+
+var appIDOverrideKey = "holds a string, being the full app ID"
+
+func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context {
+ return netcontext.WithValue(ctx, &appIDOverrideKey, appID)
+}
+
+var namespaceKey = "holds the namespace string"
+
+func withNamespace(ctx netcontext.Context, ns string) netcontext.Context {
+ return netcontext.WithValue(ctx, &namespaceKey, ns)
+}
+
+func NamespaceFromContext(ctx netcontext.Context) string {
+ // If there's no namespace, return the empty string.
+ ns, _ := ctx.Value(&namespaceKey).(string)
+ return ns
+}
+
+// FullyQualifiedAppID returns the fully-qualified application ID.
+// This may contain a partition prefix (e.g. "s~" for High Replication apps),
+// or a domain prefix (e.g. "example.com:").
+func FullyQualifiedAppID(ctx netcontext.Context) string {
+ if id, ok := ctx.Value(&appIDOverrideKey).(string); ok {
+ return id
+ }
+ return fullyQualifiedAppID(ctx)
+}
+
+func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) {
+ if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok {
+ f(level, format, args...)
+ return
+ }
+ logf(fromContext(ctx), level, format, args...)
+}
+
+// NamespacedContext wraps a Context to support namespaces.
+func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context {
+ return withNamespace(ctx, namespace)
+}
diff --git a/vendor/google.golang.org/appengine/internal/app_id.go b/vendor/google.golang.org/appengine/internal/app_id.go
new file mode 100644
index 0000000..11df8c0
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/app_id.go
@@ -0,0 +1,28 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+ "strings"
+)
+
+func parseFullAppID(appid string) (partition, domain, displayID string) {
+ if i := strings.Index(appid, "~"); i != -1 {
+ partition, appid = appid[:i], appid[i+1:]
+ }
+ if i := strings.Index(appid, ":"); i != -1 {
+ domain, appid = appid[:i], appid[i+1:]
+ }
+ return partition, domain, appid
+}
+
+// appID returns "appid" or "domain.com:appid".
+func appID(fullAppID string) string {
+ _, dom, dis := parseFullAppID(fullAppID)
+ if dom != "" {
+ return dom + ":" + dis
+ }
+ return dis
+}
diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go
new file mode 100644
index 0000000..87d9701
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go
@@ -0,0 +1,296 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/app_identity/app_identity_service.proto
+// DO NOT EDIT!
+
+/*
+Package app_identity is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/app_identity/app_identity_service.proto
+
+It has these top-level messages:
+ AppIdentityServiceError
+ SignForAppRequest
+ SignForAppResponse
+ GetPublicCertificateForAppRequest
+ PublicCertificate
+ GetPublicCertificateForAppResponse
+ GetServiceAccountNameRequest
+ GetServiceAccountNameResponse
+ GetAccessTokenRequest
+ GetAccessTokenResponse
+ GetDefaultGcsBucketNameRequest
+ GetDefaultGcsBucketNameResponse
+*/
+package app_identity
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type AppIdentityServiceError_ErrorCode int32
+
+const (
+ AppIdentityServiceError_SUCCESS AppIdentityServiceError_ErrorCode = 0
+ AppIdentityServiceError_UNKNOWN_SCOPE AppIdentityServiceError_ErrorCode = 9
+ AppIdentityServiceError_BLOB_TOO_LARGE AppIdentityServiceError_ErrorCode = 1000
+ AppIdentityServiceError_DEADLINE_EXCEEDED AppIdentityServiceError_ErrorCode = 1001
+ AppIdentityServiceError_NOT_A_VALID_APP AppIdentityServiceError_ErrorCode = 1002
+ AppIdentityServiceError_UNKNOWN_ERROR AppIdentityServiceError_ErrorCode = 1003
+ AppIdentityServiceError_NOT_ALLOWED AppIdentityServiceError_ErrorCode = 1005
+ AppIdentityServiceError_NOT_IMPLEMENTED AppIdentityServiceError_ErrorCode = 1006
+)
+
+var AppIdentityServiceError_ErrorCode_name = map[int32]string{
+ 0: "SUCCESS",
+ 9: "UNKNOWN_SCOPE",
+ 1000: "BLOB_TOO_LARGE",
+ 1001: "DEADLINE_EXCEEDED",
+ 1002: "NOT_A_VALID_APP",
+ 1003: "UNKNOWN_ERROR",
+ 1005: "NOT_ALLOWED",
+ 1006: "NOT_IMPLEMENTED",
+}
+var AppIdentityServiceError_ErrorCode_value = map[string]int32{
+ "SUCCESS": 0,
+ "UNKNOWN_SCOPE": 9,
+ "BLOB_TOO_LARGE": 1000,
+ "DEADLINE_EXCEEDED": 1001,
+ "NOT_A_VALID_APP": 1002,
+ "UNKNOWN_ERROR": 1003,
+ "NOT_ALLOWED": 1005,
+ "NOT_IMPLEMENTED": 1006,
+}
+
+func (x AppIdentityServiceError_ErrorCode) Enum() *AppIdentityServiceError_ErrorCode {
+ p := new(AppIdentityServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x AppIdentityServiceError_ErrorCode) String() string {
+ return proto.EnumName(AppIdentityServiceError_ErrorCode_name, int32(x))
+}
+func (x *AppIdentityServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(AppIdentityServiceError_ErrorCode_value, data, "AppIdentityServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = AppIdentityServiceError_ErrorCode(value)
+ return nil
+}
+
+type AppIdentityServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AppIdentityServiceError) Reset() { *m = AppIdentityServiceError{} }
+func (m *AppIdentityServiceError) String() string { return proto.CompactTextString(m) }
+func (*AppIdentityServiceError) ProtoMessage() {}
+
+type SignForAppRequest struct {
+ BytesToSign []byte `protobuf:"bytes,1,opt,name=bytes_to_sign" json:"bytes_to_sign,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SignForAppRequest) Reset() { *m = SignForAppRequest{} }
+func (m *SignForAppRequest) String() string { return proto.CompactTextString(m) }
+func (*SignForAppRequest) ProtoMessage() {}
+
+func (m *SignForAppRequest) GetBytesToSign() []byte {
+ if m != nil {
+ return m.BytesToSign
+ }
+ return nil
+}
+
+type SignForAppResponse struct {
+ KeyName *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"`
+ SignatureBytes []byte `protobuf:"bytes,2,opt,name=signature_bytes" json:"signature_bytes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SignForAppResponse) Reset() { *m = SignForAppResponse{} }
+func (m *SignForAppResponse) String() string { return proto.CompactTextString(m) }
+func (*SignForAppResponse) ProtoMessage() {}
+
+func (m *SignForAppResponse) GetKeyName() string {
+ if m != nil && m.KeyName != nil {
+ return *m.KeyName
+ }
+ return ""
+}
+
+func (m *SignForAppResponse) GetSignatureBytes() []byte {
+ if m != nil {
+ return m.SignatureBytes
+ }
+ return nil
+}
+
+type GetPublicCertificateForAppRequest struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetPublicCertificateForAppRequest) Reset() { *m = GetPublicCertificateForAppRequest{} }
+func (m *GetPublicCertificateForAppRequest) String() string { return proto.CompactTextString(m) }
+func (*GetPublicCertificateForAppRequest) ProtoMessage() {}
+
+type PublicCertificate struct {
+ KeyName *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"`
+ X509CertificatePem *string `protobuf:"bytes,2,opt,name=x509_certificate_pem" json:"x509_certificate_pem,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PublicCertificate) Reset() { *m = PublicCertificate{} }
+func (m *PublicCertificate) String() string { return proto.CompactTextString(m) }
+func (*PublicCertificate) ProtoMessage() {}
+
+func (m *PublicCertificate) GetKeyName() string {
+ if m != nil && m.KeyName != nil {
+ return *m.KeyName
+ }
+ return ""
+}
+
+func (m *PublicCertificate) GetX509CertificatePem() string {
+ if m != nil && m.X509CertificatePem != nil {
+ return *m.X509CertificatePem
+ }
+ return ""
+}
+
+type GetPublicCertificateForAppResponse struct {
+ PublicCertificateList []*PublicCertificate `protobuf:"bytes,1,rep,name=public_certificate_list" json:"public_certificate_list,omitempty"`
+ MaxClientCacheTimeInSecond *int64 `protobuf:"varint,2,opt,name=max_client_cache_time_in_second" json:"max_client_cache_time_in_second,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetPublicCertificateForAppResponse) Reset() { *m = GetPublicCertificateForAppResponse{} }
+func (m *GetPublicCertificateForAppResponse) String() string { return proto.CompactTextString(m) }
+func (*GetPublicCertificateForAppResponse) ProtoMessage() {}
+
+func (m *GetPublicCertificateForAppResponse) GetPublicCertificateList() []*PublicCertificate {
+ if m != nil {
+ return m.PublicCertificateList
+ }
+ return nil
+}
+
+func (m *GetPublicCertificateForAppResponse) GetMaxClientCacheTimeInSecond() int64 {
+ if m != nil && m.MaxClientCacheTimeInSecond != nil {
+ return *m.MaxClientCacheTimeInSecond
+ }
+ return 0
+}
+
+type GetServiceAccountNameRequest struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetServiceAccountNameRequest) Reset() { *m = GetServiceAccountNameRequest{} }
+func (m *GetServiceAccountNameRequest) String() string { return proto.CompactTextString(m) }
+func (*GetServiceAccountNameRequest) ProtoMessage() {}
+
+type GetServiceAccountNameResponse struct {
+ ServiceAccountName *string `protobuf:"bytes,1,opt,name=service_account_name" json:"service_account_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetServiceAccountNameResponse) Reset() { *m = GetServiceAccountNameResponse{} }
+func (m *GetServiceAccountNameResponse) String() string { return proto.CompactTextString(m) }
+func (*GetServiceAccountNameResponse) ProtoMessage() {}
+
+func (m *GetServiceAccountNameResponse) GetServiceAccountName() string {
+ if m != nil && m.ServiceAccountName != nil {
+ return *m.ServiceAccountName
+ }
+ return ""
+}
+
+type GetAccessTokenRequest struct {
+ Scope []string `protobuf:"bytes,1,rep,name=scope" json:"scope,omitempty"`
+ ServiceAccountId *int64 `protobuf:"varint,2,opt,name=service_account_id" json:"service_account_id,omitempty"`
+ ServiceAccountName *string `protobuf:"bytes,3,opt,name=service_account_name" json:"service_account_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetAccessTokenRequest) Reset() { *m = GetAccessTokenRequest{} }
+func (m *GetAccessTokenRequest) String() string { return proto.CompactTextString(m) }
+func (*GetAccessTokenRequest) ProtoMessage() {}
+
+func (m *GetAccessTokenRequest) GetScope() []string {
+ if m != nil {
+ return m.Scope
+ }
+ return nil
+}
+
+func (m *GetAccessTokenRequest) GetServiceAccountId() int64 {
+ if m != nil && m.ServiceAccountId != nil {
+ return *m.ServiceAccountId
+ }
+ return 0
+}
+
+func (m *GetAccessTokenRequest) GetServiceAccountName() string {
+ if m != nil && m.ServiceAccountName != nil {
+ return *m.ServiceAccountName
+ }
+ return ""
+}
+
+type GetAccessTokenResponse struct {
+ AccessToken *string `protobuf:"bytes,1,opt,name=access_token" json:"access_token,omitempty"`
+ ExpirationTime *int64 `protobuf:"varint,2,opt,name=expiration_time" json:"expiration_time,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetAccessTokenResponse) Reset() { *m = GetAccessTokenResponse{} }
+func (m *GetAccessTokenResponse) String() string { return proto.CompactTextString(m) }
+func (*GetAccessTokenResponse) ProtoMessage() {}
+
+func (m *GetAccessTokenResponse) GetAccessToken() string {
+ if m != nil && m.AccessToken != nil {
+ return *m.AccessToken
+ }
+ return ""
+}
+
+func (m *GetAccessTokenResponse) GetExpirationTime() int64 {
+ if m != nil && m.ExpirationTime != nil {
+ return *m.ExpirationTime
+ }
+ return 0
+}
+
+type GetDefaultGcsBucketNameRequest struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetDefaultGcsBucketNameRequest) Reset() { *m = GetDefaultGcsBucketNameRequest{} }
+func (m *GetDefaultGcsBucketNameRequest) String() string { return proto.CompactTextString(m) }
+func (*GetDefaultGcsBucketNameRequest) ProtoMessage() {}
+
+type GetDefaultGcsBucketNameResponse struct {
+ DefaultGcsBucketName *string `protobuf:"bytes,1,opt,name=default_gcs_bucket_name" json:"default_gcs_bucket_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetDefaultGcsBucketNameResponse) Reset() { *m = GetDefaultGcsBucketNameResponse{} }
+func (m *GetDefaultGcsBucketNameResponse) String() string { return proto.CompactTextString(m) }
+func (*GetDefaultGcsBucketNameResponse) ProtoMessage() {}
+
+func (m *GetDefaultGcsBucketNameResponse) GetDefaultGcsBucketName() string {
+ if m != nil && m.DefaultGcsBucketName != nil {
+ return *m.DefaultGcsBucketName
+ }
+ return ""
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto
new file mode 100644
index 0000000..19610ca
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/app_identity/app_identity_service.proto
@@ -0,0 +1,64 @@
+syntax = "proto2";
+option go_package = "app_identity";
+
+package appengine;
+
+message AppIdentityServiceError {
+ enum ErrorCode {
+ SUCCESS = 0;
+ UNKNOWN_SCOPE = 9;
+ BLOB_TOO_LARGE = 1000;
+ DEADLINE_EXCEEDED = 1001;
+ NOT_A_VALID_APP = 1002;
+ UNKNOWN_ERROR = 1003;
+ NOT_ALLOWED = 1005;
+ NOT_IMPLEMENTED = 1006;
+ }
+}
+
+message SignForAppRequest {
+ optional bytes bytes_to_sign = 1;
+}
+
+message SignForAppResponse {
+ optional string key_name = 1;
+ optional bytes signature_bytes = 2;
+}
+
+message GetPublicCertificateForAppRequest {
+}
+
+message PublicCertificate {
+ optional string key_name = 1;
+ optional string x509_certificate_pem = 2;
+}
+
+message GetPublicCertificateForAppResponse {
+ repeated PublicCertificate public_certificate_list = 1;
+ optional int64 max_client_cache_time_in_second = 2;
+}
+
+message GetServiceAccountNameRequest {
+}
+
+message GetServiceAccountNameResponse {
+ optional string service_account_name = 1;
+}
+
+message GetAccessTokenRequest {
+ repeated string scope = 1;
+ optional int64 service_account_id = 2;
+ optional string service_account_name = 3;
+}
+
+message GetAccessTokenResponse {
+ optional string access_token = 1;
+ optional int64 expiration_time = 2;
+}
+
+message GetDefaultGcsBucketNameRequest {
+}
+
+message GetDefaultGcsBucketNameResponse {
+ optional string default_gcs_bucket_name = 1;
+}
diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go
new file mode 100644
index 0000000..36a1956
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go
@@ -0,0 +1,133 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/base/api_base.proto
+// DO NOT EDIT!
+
+/*
+Package base is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/base/api_base.proto
+
+It has these top-level messages:
+ StringProto
+ Integer32Proto
+ Integer64Proto
+ BoolProto
+ DoubleProto
+ BytesProto
+ VoidProto
+*/
+package base
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type StringProto struct {
+ Value *string `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *StringProto) Reset() { *m = StringProto{} }
+func (m *StringProto) String() string { return proto.CompactTextString(m) }
+func (*StringProto) ProtoMessage() {}
+
+func (m *StringProto) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+type Integer32Proto struct {
+ Value *int32 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Integer32Proto) Reset() { *m = Integer32Proto{} }
+func (m *Integer32Proto) String() string { return proto.CompactTextString(m) }
+func (*Integer32Proto) ProtoMessage() {}
+
+func (m *Integer32Proto) GetValue() int32 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Integer64Proto struct {
+ Value *int64 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Integer64Proto) Reset() { *m = Integer64Proto{} }
+func (m *Integer64Proto) String() string { return proto.CompactTextString(m) }
+func (*Integer64Proto) ProtoMessage() {}
+
+func (m *Integer64Proto) GetValue() int64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type BoolProto struct {
+ Value *bool `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BoolProto) Reset() { *m = BoolProto{} }
+func (m *BoolProto) String() string { return proto.CompactTextString(m) }
+func (*BoolProto) ProtoMessage() {}
+
+func (m *BoolProto) GetValue() bool {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return false
+}
+
+type DoubleProto struct {
+ Value *float64 `protobuf:"fixed64,1,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DoubleProto) Reset() { *m = DoubleProto{} }
+func (m *DoubleProto) String() string { return proto.CompactTextString(m) }
+func (*DoubleProto) ProtoMessage() {}
+
+func (m *DoubleProto) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type BytesProto struct {
+ Value []byte `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BytesProto) Reset() { *m = BytesProto{} }
+func (m *BytesProto) String() string { return proto.CompactTextString(m) }
+func (*BytesProto) ProtoMessage() {}
+
+func (m *BytesProto) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type VoidProto struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *VoidProto) Reset() { *m = VoidProto{} }
+func (m *VoidProto) String() string { return proto.CompactTextString(m) }
+func (*VoidProto) ProtoMessage() {}
diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.proto b/vendor/google.golang.org/appengine/internal/base/api_base.proto
new file mode 100644
index 0000000..56cd7a3
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/base/api_base.proto
@@ -0,0 +1,33 @@
+// Built-in base types for API calls. Primarily useful as return types.
+
+syntax = "proto2";
+option go_package = "base";
+
+package appengine.base;
+
+message StringProto {
+ required string value = 1;
+}
+
+message Integer32Proto {
+ required int32 value = 1;
+}
+
+message Integer64Proto {
+ required int64 value = 1;
+}
+
+message BoolProto {
+ required bool value = 1;
+}
+
+message DoubleProto {
+ required double value = 1;
+}
+
+message BytesProto {
+ required bytes value = 1 [ctype=CORD];
+}
+
+message VoidProto {
+}
diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
new file mode 100644
index 0000000..8613cb7
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
@@ -0,0 +1,2778 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/datastore/datastore_v3.proto
+// DO NOT EDIT!
+
+/*
+Package datastore is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/datastore/datastore_v3.proto
+
+It has these top-level messages:
+ Action
+ PropertyValue
+ Property
+ Path
+ Reference
+ User
+ EntityProto
+ CompositeProperty
+ Index
+ CompositeIndex
+ IndexPostfix
+ IndexPosition
+ Snapshot
+ InternalHeader
+ Transaction
+ Query
+ CompiledQuery
+ CompiledCursor
+ Cursor
+ Error
+ Cost
+ GetRequest
+ GetResponse
+ PutRequest
+ PutResponse
+ TouchRequest
+ TouchResponse
+ DeleteRequest
+ DeleteResponse
+ NextRequest
+ QueryResult
+ AllocateIdsRequest
+ AllocateIdsResponse
+ CompositeIndices
+ AddActionsRequest
+ AddActionsResponse
+ BeginTransactionRequest
+ CommitResponse
+*/
+package datastore
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type Property_Meaning int32
+
+const (
+ Property_NO_MEANING Property_Meaning = 0
+ Property_BLOB Property_Meaning = 14
+ Property_TEXT Property_Meaning = 15
+ Property_BYTESTRING Property_Meaning = 16
+ Property_ATOM_CATEGORY Property_Meaning = 1
+ Property_ATOM_LINK Property_Meaning = 2
+ Property_ATOM_TITLE Property_Meaning = 3
+ Property_ATOM_CONTENT Property_Meaning = 4
+ Property_ATOM_SUMMARY Property_Meaning = 5
+ Property_ATOM_AUTHOR Property_Meaning = 6
+ Property_GD_WHEN Property_Meaning = 7
+ Property_GD_EMAIL Property_Meaning = 8
+ Property_GEORSS_POINT Property_Meaning = 9
+ Property_GD_IM Property_Meaning = 10
+ Property_GD_PHONENUMBER Property_Meaning = 11
+ Property_GD_POSTALADDRESS Property_Meaning = 12
+ Property_GD_RATING Property_Meaning = 13
+ Property_BLOBKEY Property_Meaning = 17
+ Property_ENTITY_PROTO Property_Meaning = 19
+ Property_INDEX_VALUE Property_Meaning = 18
+)
+
+var Property_Meaning_name = map[int32]string{
+ 0: "NO_MEANING",
+ 14: "BLOB",
+ 15: "TEXT",
+ 16: "BYTESTRING",
+ 1: "ATOM_CATEGORY",
+ 2: "ATOM_LINK",
+ 3: "ATOM_TITLE",
+ 4: "ATOM_CONTENT",
+ 5: "ATOM_SUMMARY",
+ 6: "ATOM_AUTHOR",
+ 7: "GD_WHEN",
+ 8: "GD_EMAIL",
+ 9: "GEORSS_POINT",
+ 10: "GD_IM",
+ 11: "GD_PHONENUMBER",
+ 12: "GD_POSTALADDRESS",
+ 13: "GD_RATING",
+ 17: "BLOBKEY",
+ 19: "ENTITY_PROTO",
+ 18: "INDEX_VALUE",
+}
+var Property_Meaning_value = map[string]int32{
+ "NO_MEANING": 0,
+ "BLOB": 14,
+ "TEXT": 15,
+ "BYTESTRING": 16,
+ "ATOM_CATEGORY": 1,
+ "ATOM_LINK": 2,
+ "ATOM_TITLE": 3,
+ "ATOM_CONTENT": 4,
+ "ATOM_SUMMARY": 5,
+ "ATOM_AUTHOR": 6,
+ "GD_WHEN": 7,
+ "GD_EMAIL": 8,
+ "GEORSS_POINT": 9,
+ "GD_IM": 10,
+ "GD_PHONENUMBER": 11,
+ "GD_POSTALADDRESS": 12,
+ "GD_RATING": 13,
+ "BLOBKEY": 17,
+ "ENTITY_PROTO": 19,
+ "INDEX_VALUE": 18,
+}
+
+func (x Property_Meaning) Enum() *Property_Meaning {
+ p := new(Property_Meaning)
+ *p = x
+ return p
+}
+func (x Property_Meaning) String() string {
+ return proto.EnumName(Property_Meaning_name, int32(x))
+}
+func (x *Property_Meaning) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Property_Meaning_value, data, "Property_Meaning")
+ if err != nil {
+ return err
+ }
+ *x = Property_Meaning(value)
+ return nil
+}
+
+type Property_FtsTokenizationOption int32
+
+const (
+ Property_HTML Property_FtsTokenizationOption = 1
+ Property_ATOM Property_FtsTokenizationOption = 2
+)
+
+var Property_FtsTokenizationOption_name = map[int32]string{
+ 1: "HTML",
+ 2: "ATOM",
+}
+var Property_FtsTokenizationOption_value = map[string]int32{
+ "HTML": 1,
+ "ATOM": 2,
+}
+
+func (x Property_FtsTokenizationOption) Enum() *Property_FtsTokenizationOption {
+ p := new(Property_FtsTokenizationOption)
+ *p = x
+ return p
+}
+func (x Property_FtsTokenizationOption) String() string {
+ return proto.EnumName(Property_FtsTokenizationOption_name, int32(x))
+}
+func (x *Property_FtsTokenizationOption) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Property_FtsTokenizationOption_value, data, "Property_FtsTokenizationOption")
+ if err != nil {
+ return err
+ }
+ *x = Property_FtsTokenizationOption(value)
+ return nil
+}
+
+type EntityProto_Kind int32
+
+const (
+ EntityProto_GD_CONTACT EntityProto_Kind = 1
+ EntityProto_GD_EVENT EntityProto_Kind = 2
+ EntityProto_GD_MESSAGE EntityProto_Kind = 3
+)
+
+var EntityProto_Kind_name = map[int32]string{
+ 1: "GD_CONTACT",
+ 2: "GD_EVENT",
+ 3: "GD_MESSAGE",
+}
+var EntityProto_Kind_value = map[string]int32{
+ "GD_CONTACT": 1,
+ "GD_EVENT": 2,
+ "GD_MESSAGE": 3,
+}
+
+func (x EntityProto_Kind) Enum() *EntityProto_Kind {
+ p := new(EntityProto_Kind)
+ *p = x
+ return p
+}
+func (x EntityProto_Kind) String() string {
+ return proto.EnumName(EntityProto_Kind_name, int32(x))
+}
+func (x *EntityProto_Kind) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(EntityProto_Kind_value, data, "EntityProto_Kind")
+ if err != nil {
+ return err
+ }
+ *x = EntityProto_Kind(value)
+ return nil
+}
+
+type Index_Property_Direction int32
+
+const (
+ Index_Property_ASCENDING Index_Property_Direction = 1
+ Index_Property_DESCENDING Index_Property_Direction = 2
+)
+
+var Index_Property_Direction_name = map[int32]string{
+ 1: "ASCENDING",
+ 2: "DESCENDING",
+}
+var Index_Property_Direction_value = map[string]int32{
+ "ASCENDING": 1,
+ "DESCENDING": 2,
+}
+
+func (x Index_Property_Direction) Enum() *Index_Property_Direction {
+ p := new(Index_Property_Direction)
+ *p = x
+ return p
+}
+func (x Index_Property_Direction) String() string {
+ return proto.EnumName(Index_Property_Direction_name, int32(x))
+}
+func (x *Index_Property_Direction) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Index_Property_Direction_value, data, "Index_Property_Direction")
+ if err != nil {
+ return err
+ }
+ *x = Index_Property_Direction(value)
+ return nil
+}
+
+type CompositeIndex_State int32
+
+const (
+ CompositeIndex_WRITE_ONLY CompositeIndex_State = 1
+ CompositeIndex_READ_WRITE CompositeIndex_State = 2
+ CompositeIndex_DELETED CompositeIndex_State = 3
+ CompositeIndex_ERROR CompositeIndex_State = 4
+)
+
+var CompositeIndex_State_name = map[int32]string{
+ 1: "WRITE_ONLY",
+ 2: "READ_WRITE",
+ 3: "DELETED",
+ 4: "ERROR",
+}
+var CompositeIndex_State_value = map[string]int32{
+ "WRITE_ONLY": 1,
+ "READ_WRITE": 2,
+ "DELETED": 3,
+ "ERROR": 4,
+}
+
+func (x CompositeIndex_State) Enum() *CompositeIndex_State {
+ p := new(CompositeIndex_State)
+ *p = x
+ return p
+}
+func (x CompositeIndex_State) String() string {
+ return proto.EnumName(CompositeIndex_State_name, int32(x))
+}
+func (x *CompositeIndex_State) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(CompositeIndex_State_value, data, "CompositeIndex_State")
+ if err != nil {
+ return err
+ }
+ *x = CompositeIndex_State(value)
+ return nil
+}
+
+type Snapshot_Status int32
+
+const (
+ Snapshot_INACTIVE Snapshot_Status = 0
+ Snapshot_ACTIVE Snapshot_Status = 1
+)
+
+var Snapshot_Status_name = map[int32]string{
+ 0: "INACTIVE",
+ 1: "ACTIVE",
+}
+var Snapshot_Status_value = map[string]int32{
+ "INACTIVE": 0,
+ "ACTIVE": 1,
+}
+
+func (x Snapshot_Status) Enum() *Snapshot_Status {
+ p := new(Snapshot_Status)
+ *p = x
+ return p
+}
+func (x Snapshot_Status) String() string {
+ return proto.EnumName(Snapshot_Status_name, int32(x))
+}
+func (x *Snapshot_Status) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Snapshot_Status_value, data, "Snapshot_Status")
+ if err != nil {
+ return err
+ }
+ *x = Snapshot_Status(value)
+ return nil
+}
+
+type Query_Hint int32
+
+const (
+ Query_ORDER_FIRST Query_Hint = 1
+ Query_ANCESTOR_FIRST Query_Hint = 2
+ Query_FILTER_FIRST Query_Hint = 3
+)
+
+var Query_Hint_name = map[int32]string{
+ 1: "ORDER_FIRST",
+ 2: "ANCESTOR_FIRST",
+ 3: "FILTER_FIRST",
+}
+var Query_Hint_value = map[string]int32{
+ "ORDER_FIRST": 1,
+ "ANCESTOR_FIRST": 2,
+ "FILTER_FIRST": 3,
+}
+
+func (x Query_Hint) Enum() *Query_Hint {
+ p := new(Query_Hint)
+ *p = x
+ return p
+}
+func (x Query_Hint) String() string {
+ return proto.EnumName(Query_Hint_name, int32(x))
+}
+func (x *Query_Hint) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Query_Hint_value, data, "Query_Hint")
+ if err != nil {
+ return err
+ }
+ *x = Query_Hint(value)
+ return nil
+}
+
+type Query_Filter_Operator int32
+
+const (
+ Query_Filter_LESS_THAN Query_Filter_Operator = 1
+ Query_Filter_LESS_THAN_OR_EQUAL Query_Filter_Operator = 2
+ Query_Filter_GREATER_THAN Query_Filter_Operator = 3
+ Query_Filter_GREATER_THAN_OR_EQUAL Query_Filter_Operator = 4
+ Query_Filter_EQUAL Query_Filter_Operator = 5
+ Query_Filter_IN Query_Filter_Operator = 6
+ Query_Filter_EXISTS Query_Filter_Operator = 7
+)
+
+var Query_Filter_Operator_name = map[int32]string{
+ 1: "LESS_THAN",
+ 2: "LESS_THAN_OR_EQUAL",
+ 3: "GREATER_THAN",
+ 4: "GREATER_THAN_OR_EQUAL",
+ 5: "EQUAL",
+ 6: "IN",
+ 7: "EXISTS",
+}
+var Query_Filter_Operator_value = map[string]int32{
+ "LESS_THAN": 1,
+ "LESS_THAN_OR_EQUAL": 2,
+ "GREATER_THAN": 3,
+ "GREATER_THAN_OR_EQUAL": 4,
+ "EQUAL": 5,
+ "IN": 6,
+ "EXISTS": 7,
+}
+
+func (x Query_Filter_Operator) Enum() *Query_Filter_Operator {
+ p := new(Query_Filter_Operator)
+ *p = x
+ return p
+}
+func (x Query_Filter_Operator) String() string {
+ return proto.EnumName(Query_Filter_Operator_name, int32(x))
+}
+func (x *Query_Filter_Operator) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Query_Filter_Operator_value, data, "Query_Filter_Operator")
+ if err != nil {
+ return err
+ }
+ *x = Query_Filter_Operator(value)
+ return nil
+}
+
+type Query_Order_Direction int32
+
+const (
+ Query_Order_ASCENDING Query_Order_Direction = 1
+ Query_Order_DESCENDING Query_Order_Direction = 2
+)
+
+var Query_Order_Direction_name = map[int32]string{
+ 1: "ASCENDING",
+ 2: "DESCENDING",
+}
+var Query_Order_Direction_value = map[string]int32{
+ "ASCENDING": 1,
+ "DESCENDING": 2,
+}
+
+func (x Query_Order_Direction) Enum() *Query_Order_Direction {
+ p := new(Query_Order_Direction)
+ *p = x
+ return p
+}
+func (x Query_Order_Direction) String() string {
+ return proto.EnumName(Query_Order_Direction_name, int32(x))
+}
+func (x *Query_Order_Direction) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Query_Order_Direction_value, data, "Query_Order_Direction")
+ if err != nil {
+ return err
+ }
+ *x = Query_Order_Direction(value)
+ return nil
+}
+
+type Error_ErrorCode int32
+
+const (
+ Error_BAD_REQUEST Error_ErrorCode = 1
+ Error_CONCURRENT_TRANSACTION Error_ErrorCode = 2
+ Error_INTERNAL_ERROR Error_ErrorCode = 3
+ Error_NEED_INDEX Error_ErrorCode = 4
+ Error_TIMEOUT Error_ErrorCode = 5
+ Error_PERMISSION_DENIED Error_ErrorCode = 6
+ Error_BIGTABLE_ERROR Error_ErrorCode = 7
+ Error_COMMITTED_BUT_STILL_APPLYING Error_ErrorCode = 8
+ Error_CAPABILITY_DISABLED Error_ErrorCode = 9
+ Error_TRY_ALTERNATE_BACKEND Error_ErrorCode = 10
+ Error_SAFE_TIME_TOO_OLD Error_ErrorCode = 11
+)
+
+var Error_ErrorCode_name = map[int32]string{
+ 1: "BAD_REQUEST",
+ 2: "CONCURRENT_TRANSACTION",
+ 3: "INTERNAL_ERROR",
+ 4: "NEED_INDEX",
+ 5: "TIMEOUT",
+ 6: "PERMISSION_DENIED",
+ 7: "BIGTABLE_ERROR",
+ 8: "COMMITTED_BUT_STILL_APPLYING",
+ 9: "CAPABILITY_DISABLED",
+ 10: "TRY_ALTERNATE_BACKEND",
+ 11: "SAFE_TIME_TOO_OLD",
+}
+var Error_ErrorCode_value = map[string]int32{
+ "BAD_REQUEST": 1,
+ "CONCURRENT_TRANSACTION": 2,
+ "INTERNAL_ERROR": 3,
+ "NEED_INDEX": 4,
+ "TIMEOUT": 5,
+ "PERMISSION_DENIED": 6,
+ "BIGTABLE_ERROR": 7,
+ "COMMITTED_BUT_STILL_APPLYING": 8,
+ "CAPABILITY_DISABLED": 9,
+ "TRY_ALTERNATE_BACKEND": 10,
+ "SAFE_TIME_TOO_OLD": 11,
+}
+
+func (x Error_ErrorCode) Enum() *Error_ErrorCode {
+ p := new(Error_ErrorCode)
+ *p = x
+ return p
+}
+func (x Error_ErrorCode) String() string {
+ return proto.EnumName(Error_ErrorCode_name, int32(x))
+}
+func (x *Error_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Error_ErrorCode_value, data, "Error_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = Error_ErrorCode(value)
+ return nil
+}
+
+type PutRequest_AutoIdPolicy int32
+
+const (
+ PutRequest_CURRENT PutRequest_AutoIdPolicy = 0
+ PutRequest_SEQUENTIAL PutRequest_AutoIdPolicy = 1
+)
+
+var PutRequest_AutoIdPolicy_name = map[int32]string{
+ 0: "CURRENT",
+ 1: "SEQUENTIAL",
+}
+var PutRequest_AutoIdPolicy_value = map[string]int32{
+ "CURRENT": 0,
+ "SEQUENTIAL": 1,
+}
+
+func (x PutRequest_AutoIdPolicy) Enum() *PutRequest_AutoIdPolicy {
+ p := new(PutRequest_AutoIdPolicy)
+ *p = x
+ return p
+}
+func (x PutRequest_AutoIdPolicy) String() string {
+ return proto.EnumName(PutRequest_AutoIdPolicy_name, int32(x))
+}
+func (x *PutRequest_AutoIdPolicy) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(PutRequest_AutoIdPolicy_value, data, "PutRequest_AutoIdPolicy")
+ if err != nil {
+ return err
+ }
+ *x = PutRequest_AutoIdPolicy(value)
+ return nil
+}
+
+type Action struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Action) Reset() { *m = Action{} }
+func (m *Action) String() string { return proto.CompactTextString(m) }
+func (*Action) ProtoMessage() {}
+
+type PropertyValue struct {
+ Int64Value *int64 `protobuf:"varint,1,opt,name=int64Value" json:"int64Value,omitempty"`
+ BooleanValue *bool `protobuf:"varint,2,opt,name=booleanValue" json:"booleanValue,omitempty"`
+ StringValue *string `protobuf:"bytes,3,opt,name=stringValue" json:"stringValue,omitempty"`
+ DoubleValue *float64 `protobuf:"fixed64,4,opt,name=doubleValue" json:"doubleValue,omitempty"`
+ Pointvalue *PropertyValue_PointValue `protobuf:"group,5,opt,name=PointValue" json:"pointvalue,omitempty"`
+ Uservalue *PropertyValue_UserValue `protobuf:"group,8,opt,name=UserValue" json:"uservalue,omitempty"`
+ Referencevalue *PropertyValue_ReferenceValue `protobuf:"group,12,opt,name=ReferenceValue" json:"referencevalue,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PropertyValue) Reset() { *m = PropertyValue{} }
+func (m *PropertyValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue) ProtoMessage() {}
+
+func (m *PropertyValue) GetInt64Value() int64 {
+ if m != nil && m.Int64Value != nil {
+ return *m.Int64Value
+ }
+ return 0
+}
+
+func (m *PropertyValue) GetBooleanValue() bool {
+ if m != nil && m.BooleanValue != nil {
+ return *m.BooleanValue
+ }
+ return false
+}
+
+func (m *PropertyValue) GetStringValue() string {
+ if m != nil && m.StringValue != nil {
+ return *m.StringValue
+ }
+ return ""
+}
+
+func (m *PropertyValue) GetDoubleValue() float64 {
+ if m != nil && m.DoubleValue != nil {
+ return *m.DoubleValue
+ }
+ return 0
+}
+
+func (m *PropertyValue) GetPointvalue() *PropertyValue_PointValue {
+ if m != nil {
+ return m.Pointvalue
+ }
+ return nil
+}
+
+func (m *PropertyValue) GetUservalue() *PropertyValue_UserValue {
+ if m != nil {
+ return m.Uservalue
+ }
+ return nil
+}
+
+func (m *PropertyValue) GetReferencevalue() *PropertyValue_ReferenceValue {
+ if m != nil {
+ return m.Referencevalue
+ }
+ return nil
+}
+
+type PropertyValue_PointValue struct {
+ X *float64 `protobuf:"fixed64,6,req,name=x" json:"x,omitempty"`
+ Y *float64 `protobuf:"fixed64,7,req,name=y" json:"y,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PropertyValue_PointValue) Reset() { *m = PropertyValue_PointValue{} }
+func (m *PropertyValue_PointValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_PointValue) ProtoMessage() {}
+
+func (m *PropertyValue_PointValue) GetX() float64 {
+ if m != nil && m.X != nil {
+ return *m.X
+ }
+ return 0
+}
+
+func (m *PropertyValue_PointValue) GetY() float64 {
+ if m != nil && m.Y != nil {
+ return *m.Y
+ }
+ return 0
+}
+
+type PropertyValue_UserValue struct {
+ Email *string `protobuf:"bytes,9,req,name=email" json:"email,omitempty"`
+ AuthDomain *string `protobuf:"bytes,10,req,name=auth_domain" json:"auth_domain,omitempty"`
+ Nickname *string `protobuf:"bytes,11,opt,name=nickname" json:"nickname,omitempty"`
+ FederatedIdentity *string `protobuf:"bytes,21,opt,name=federated_identity" json:"federated_identity,omitempty"`
+ FederatedProvider *string `protobuf:"bytes,22,opt,name=federated_provider" json:"federated_provider,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PropertyValue_UserValue) Reset() { *m = PropertyValue_UserValue{} }
+func (m *PropertyValue_UserValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_UserValue) ProtoMessage() {}
+
+func (m *PropertyValue_UserValue) GetEmail() string {
+ if m != nil && m.Email != nil {
+ return *m.Email
+ }
+ return ""
+}
+
+func (m *PropertyValue_UserValue) GetAuthDomain() string {
+ if m != nil && m.AuthDomain != nil {
+ return *m.AuthDomain
+ }
+ return ""
+}
+
+func (m *PropertyValue_UserValue) GetNickname() string {
+ if m != nil && m.Nickname != nil {
+ return *m.Nickname
+ }
+ return ""
+}
+
+func (m *PropertyValue_UserValue) GetFederatedIdentity() string {
+ if m != nil && m.FederatedIdentity != nil {
+ return *m.FederatedIdentity
+ }
+ return ""
+}
+
+func (m *PropertyValue_UserValue) GetFederatedProvider() string {
+ if m != nil && m.FederatedProvider != nil {
+ return *m.FederatedProvider
+ }
+ return ""
+}
+
+type PropertyValue_ReferenceValue struct {
+ App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"`
+ NameSpace *string `protobuf:"bytes,20,opt,name=name_space" json:"name_space,omitempty"`
+ Pathelement []*PropertyValue_ReferenceValue_PathElement `protobuf:"group,14,rep,name=PathElement" json:"pathelement,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PropertyValue_ReferenceValue) Reset() { *m = PropertyValue_ReferenceValue{} }
+func (m *PropertyValue_ReferenceValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_ReferenceValue) ProtoMessage() {}
+
+func (m *PropertyValue_ReferenceValue) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+func (m *PropertyValue_ReferenceValue) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *PropertyValue_ReferenceValue) GetPathelement() []*PropertyValue_ReferenceValue_PathElement {
+ if m != nil {
+ return m.Pathelement
+ }
+ return nil
+}
+
+type PropertyValue_ReferenceValue_PathElement struct {
+ Type *string `protobuf:"bytes,15,req,name=type" json:"type,omitempty"`
+ Id *int64 `protobuf:"varint,16,opt,name=id" json:"id,omitempty"`
+ Name *string `protobuf:"bytes,17,opt,name=name" json:"name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PropertyValue_ReferenceValue_PathElement) Reset() {
+ *m = PropertyValue_ReferenceValue_PathElement{}
+}
+func (m *PropertyValue_ReferenceValue_PathElement) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_ReferenceValue_PathElement) ProtoMessage() {}
+
+func (m *PropertyValue_ReferenceValue_PathElement) GetType() string {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return ""
+}
+
+func (m *PropertyValue_ReferenceValue_PathElement) GetId() int64 {
+ if m != nil && m.Id != nil {
+ return *m.Id
+ }
+ return 0
+}
+
+func (m *PropertyValue_ReferenceValue_PathElement) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+type Property struct {
+ Meaning *Property_Meaning `protobuf:"varint,1,opt,name=meaning,enum=appengine.Property_Meaning,def=0" json:"meaning,omitempty"`
+ MeaningUri *string `protobuf:"bytes,2,opt,name=meaning_uri" json:"meaning_uri,omitempty"`
+ Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
+ Value *PropertyValue `protobuf:"bytes,5,req,name=value" json:"value,omitempty"`
+ Multiple *bool `protobuf:"varint,4,req,name=multiple" json:"multiple,omitempty"`
+ Searchable *bool `protobuf:"varint,6,opt,name=searchable,def=0" json:"searchable,omitempty"`
+ FtsTokenizationOption *Property_FtsTokenizationOption `protobuf:"varint,8,opt,name=fts_tokenization_option,enum=appengine.Property_FtsTokenizationOption" json:"fts_tokenization_option,omitempty"`
+ Locale *string `protobuf:"bytes,9,opt,name=locale,def=en" json:"locale,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Property) Reset() { *m = Property{} }
+func (m *Property) String() string { return proto.CompactTextString(m) }
+func (*Property) ProtoMessage() {}
+
+const Default_Property_Meaning Property_Meaning = Property_NO_MEANING
+const Default_Property_Searchable bool = false
+const Default_Property_Locale string = "en"
+
+func (m *Property) GetMeaning() Property_Meaning {
+ if m != nil && m.Meaning != nil {
+ return *m.Meaning
+ }
+ return Default_Property_Meaning
+}
+
+func (m *Property) GetMeaningUri() string {
+ if m != nil && m.MeaningUri != nil {
+ return *m.MeaningUri
+ }
+ return ""
+}
+
+func (m *Property) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *Property) GetValue() *PropertyValue {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *Property) GetMultiple() bool {
+ if m != nil && m.Multiple != nil {
+ return *m.Multiple
+ }
+ return false
+}
+
+func (m *Property) GetSearchable() bool {
+ if m != nil && m.Searchable != nil {
+ return *m.Searchable
+ }
+ return Default_Property_Searchable
+}
+
+func (m *Property) GetFtsTokenizationOption() Property_FtsTokenizationOption {
+ if m != nil && m.FtsTokenizationOption != nil {
+ return *m.FtsTokenizationOption
+ }
+ return Property_HTML
+}
+
+func (m *Property) GetLocale() string {
+ if m != nil && m.Locale != nil {
+ return *m.Locale
+ }
+ return Default_Property_Locale
+}
+
+type Path struct {
+ Element []*Path_Element `protobuf:"group,1,rep,name=Element" json:"element,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Path) Reset() { *m = Path{} }
+func (m *Path) String() string { return proto.CompactTextString(m) }
+func (*Path) ProtoMessage() {}
+
+func (m *Path) GetElement() []*Path_Element {
+ if m != nil {
+ return m.Element
+ }
+ return nil
+}
+
+type Path_Element struct {
+ Type *string `protobuf:"bytes,2,req,name=type" json:"type,omitempty"`
+ Id *int64 `protobuf:"varint,3,opt,name=id" json:"id,omitempty"`
+ Name *string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Path_Element) Reset() { *m = Path_Element{} }
+func (m *Path_Element) String() string { return proto.CompactTextString(m) }
+func (*Path_Element) ProtoMessage() {}
+
+func (m *Path_Element) GetType() string {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return ""
+}
+
+func (m *Path_Element) GetId() int64 {
+ if m != nil && m.Id != nil {
+ return *m.Id
+ }
+ return 0
+}
+
+func (m *Path_Element) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+type Reference struct {
+ App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"`
+ NameSpace *string `protobuf:"bytes,20,opt,name=name_space" json:"name_space,omitempty"`
+ Path *Path `protobuf:"bytes,14,req,name=path" json:"path,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Reference) Reset() { *m = Reference{} }
+func (m *Reference) String() string { return proto.CompactTextString(m) }
+func (*Reference) ProtoMessage() {}
+
+func (m *Reference) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+func (m *Reference) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *Reference) GetPath() *Path {
+ if m != nil {
+ return m.Path
+ }
+ return nil
+}
+
+type User struct {
+ Email *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"`
+ AuthDomain *string `protobuf:"bytes,2,req,name=auth_domain" json:"auth_domain,omitempty"`
+ Nickname *string `protobuf:"bytes,3,opt,name=nickname" json:"nickname,omitempty"`
+ FederatedIdentity *string `protobuf:"bytes,6,opt,name=federated_identity" json:"federated_identity,omitempty"`
+ FederatedProvider *string `protobuf:"bytes,7,opt,name=federated_provider" json:"federated_provider,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *User) Reset() { *m = User{} }
+func (m *User) String() string { return proto.CompactTextString(m) }
+func (*User) ProtoMessage() {}
+
+func (m *User) GetEmail() string {
+ if m != nil && m.Email != nil {
+ return *m.Email
+ }
+ return ""
+}
+
+func (m *User) GetAuthDomain() string {
+ if m != nil && m.AuthDomain != nil {
+ return *m.AuthDomain
+ }
+ return ""
+}
+
+func (m *User) GetNickname() string {
+ if m != nil && m.Nickname != nil {
+ return *m.Nickname
+ }
+ return ""
+}
+
+func (m *User) GetFederatedIdentity() string {
+ if m != nil && m.FederatedIdentity != nil {
+ return *m.FederatedIdentity
+ }
+ return ""
+}
+
+func (m *User) GetFederatedProvider() string {
+ if m != nil && m.FederatedProvider != nil {
+ return *m.FederatedProvider
+ }
+ return ""
+}
+
+type EntityProto struct {
+ Key *Reference `protobuf:"bytes,13,req,name=key" json:"key,omitempty"`
+ EntityGroup *Path `protobuf:"bytes,16,req,name=entity_group" json:"entity_group,omitempty"`
+ Owner *User `protobuf:"bytes,17,opt,name=owner" json:"owner,omitempty"`
+ Kind *EntityProto_Kind `protobuf:"varint,4,opt,name=kind,enum=appengine.EntityProto_Kind" json:"kind,omitempty"`
+ KindUri *string `protobuf:"bytes,5,opt,name=kind_uri" json:"kind_uri,omitempty"`
+ Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"`
+ RawProperty []*Property `protobuf:"bytes,15,rep,name=raw_property" json:"raw_property,omitempty"`
+ Rank *int32 `protobuf:"varint,18,opt,name=rank" json:"rank,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *EntityProto) Reset() { *m = EntityProto{} }
+func (m *EntityProto) String() string { return proto.CompactTextString(m) }
+func (*EntityProto) ProtoMessage() {}
+
+func (m *EntityProto) GetKey() *Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *EntityProto) GetEntityGroup() *Path {
+ if m != nil {
+ return m.EntityGroup
+ }
+ return nil
+}
+
+func (m *EntityProto) GetOwner() *User {
+ if m != nil {
+ return m.Owner
+ }
+ return nil
+}
+
+func (m *EntityProto) GetKind() EntityProto_Kind {
+ if m != nil && m.Kind != nil {
+ return *m.Kind
+ }
+ return EntityProto_GD_CONTACT
+}
+
+func (m *EntityProto) GetKindUri() string {
+ if m != nil && m.KindUri != nil {
+ return *m.KindUri
+ }
+ return ""
+}
+
+func (m *EntityProto) GetProperty() []*Property {
+ if m != nil {
+ return m.Property
+ }
+ return nil
+}
+
+func (m *EntityProto) GetRawProperty() []*Property {
+ if m != nil {
+ return m.RawProperty
+ }
+ return nil
+}
+
+func (m *EntityProto) GetRank() int32 {
+ if m != nil && m.Rank != nil {
+ return *m.Rank
+ }
+ return 0
+}
+
+type CompositeProperty struct {
+ IndexId *int64 `protobuf:"varint,1,req,name=index_id" json:"index_id,omitempty"`
+ Value []string `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompositeProperty) Reset() { *m = CompositeProperty{} }
+func (m *CompositeProperty) String() string { return proto.CompactTextString(m) }
+func (*CompositeProperty) ProtoMessage() {}
+
+func (m *CompositeProperty) GetIndexId() int64 {
+ if m != nil && m.IndexId != nil {
+ return *m.IndexId
+ }
+ return 0
+}
+
+func (m *CompositeProperty) GetValue() []string {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type Index struct {
+ EntityType *string `protobuf:"bytes,1,req,name=entity_type" json:"entity_type,omitempty"`
+ Ancestor *bool `protobuf:"varint,5,req,name=ancestor" json:"ancestor,omitempty"`
+ Property []*Index_Property `protobuf:"group,2,rep,name=Property" json:"property,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Index) Reset() { *m = Index{} }
+func (m *Index) String() string { return proto.CompactTextString(m) }
+func (*Index) ProtoMessage() {}
+
+func (m *Index) GetEntityType() string {
+ if m != nil && m.EntityType != nil {
+ return *m.EntityType
+ }
+ return ""
+}
+
+func (m *Index) GetAncestor() bool {
+ if m != nil && m.Ancestor != nil {
+ return *m.Ancestor
+ }
+ return false
+}
+
+func (m *Index) GetProperty() []*Index_Property {
+ if m != nil {
+ return m.Property
+ }
+ return nil
+}
+
+type Index_Property struct {
+ Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
+ Direction *Index_Property_Direction `protobuf:"varint,4,opt,name=direction,enum=appengine.Index_Property_Direction,def=1" json:"direction,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Index_Property) Reset() { *m = Index_Property{} }
+func (m *Index_Property) String() string { return proto.CompactTextString(m) }
+func (*Index_Property) ProtoMessage() {}
+
+const Default_Index_Property_Direction Index_Property_Direction = Index_Property_ASCENDING
+
+func (m *Index_Property) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *Index_Property) GetDirection() Index_Property_Direction {
+ if m != nil && m.Direction != nil {
+ return *m.Direction
+ }
+ return Default_Index_Property_Direction
+}
+
+type CompositeIndex struct {
+ AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ Id *int64 `protobuf:"varint,2,req,name=id" json:"id,omitempty"`
+ Definition *Index `protobuf:"bytes,3,req,name=definition" json:"definition,omitempty"`
+ State *CompositeIndex_State `protobuf:"varint,4,req,name=state,enum=appengine.CompositeIndex_State" json:"state,omitempty"`
+ OnlyUseIfRequired *bool `protobuf:"varint,6,opt,name=only_use_if_required,def=0" json:"only_use_if_required,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompositeIndex) Reset() { *m = CompositeIndex{} }
+func (m *CompositeIndex) String() string { return proto.CompactTextString(m) }
+func (*CompositeIndex) ProtoMessage() {}
+
+const Default_CompositeIndex_OnlyUseIfRequired bool = false
+
+func (m *CompositeIndex) GetAppId() string {
+ if m != nil && m.AppId != nil {
+ return *m.AppId
+ }
+ return ""
+}
+
+func (m *CompositeIndex) GetId() int64 {
+ if m != nil && m.Id != nil {
+ return *m.Id
+ }
+ return 0
+}
+
+func (m *CompositeIndex) GetDefinition() *Index {
+ if m != nil {
+ return m.Definition
+ }
+ return nil
+}
+
+func (m *CompositeIndex) GetState() CompositeIndex_State {
+ if m != nil && m.State != nil {
+ return *m.State
+ }
+ return CompositeIndex_WRITE_ONLY
+}
+
+func (m *CompositeIndex) GetOnlyUseIfRequired() bool {
+ if m != nil && m.OnlyUseIfRequired != nil {
+ return *m.OnlyUseIfRequired
+ }
+ return Default_CompositeIndex_OnlyUseIfRequired
+}
+
+type IndexPostfix struct {
+ IndexValue []*IndexPostfix_IndexValue `protobuf:"bytes,1,rep,name=index_value" json:"index_value,omitempty"`
+ Key *Reference `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"`
+ Before *bool `protobuf:"varint,3,opt,name=before,def=1" json:"before,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexPostfix) Reset() { *m = IndexPostfix{} }
+func (m *IndexPostfix) String() string { return proto.CompactTextString(m) }
+func (*IndexPostfix) ProtoMessage() {}
+
+const Default_IndexPostfix_Before bool = true
+
+func (m *IndexPostfix) GetIndexValue() []*IndexPostfix_IndexValue {
+ if m != nil {
+ return m.IndexValue
+ }
+ return nil
+}
+
+func (m *IndexPostfix) GetKey() *Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *IndexPostfix) GetBefore() bool {
+ if m != nil && m.Before != nil {
+ return *m.Before
+ }
+ return Default_IndexPostfix_Before
+}
+
+type IndexPostfix_IndexValue struct {
+ PropertyName *string `protobuf:"bytes,1,req,name=property_name" json:"property_name,omitempty"`
+ Value *PropertyValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexPostfix_IndexValue) Reset() { *m = IndexPostfix_IndexValue{} }
+func (m *IndexPostfix_IndexValue) String() string { return proto.CompactTextString(m) }
+func (*IndexPostfix_IndexValue) ProtoMessage() {}
+
+func (m *IndexPostfix_IndexValue) GetPropertyName() string {
+ if m != nil && m.PropertyName != nil {
+ return *m.PropertyName
+ }
+ return ""
+}
+
+func (m *IndexPostfix_IndexValue) GetValue() *PropertyValue {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type IndexPosition struct {
+ Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"`
+ Before *bool `protobuf:"varint,2,opt,name=before,def=1" json:"before,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexPosition) Reset() { *m = IndexPosition{} }
+func (m *IndexPosition) String() string { return proto.CompactTextString(m) }
+func (*IndexPosition) ProtoMessage() {}
+
+const Default_IndexPosition_Before bool = true
+
+func (m *IndexPosition) GetKey() string {
+ if m != nil && m.Key != nil {
+ return *m.Key
+ }
+ return ""
+}
+
+func (m *IndexPosition) GetBefore() bool {
+ if m != nil && m.Before != nil {
+ return *m.Before
+ }
+ return Default_IndexPosition_Before
+}
+
+type Snapshot struct {
+ Ts *int64 `protobuf:"varint,1,req,name=ts" json:"ts,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Snapshot) Reset() { *m = Snapshot{} }
+func (m *Snapshot) String() string { return proto.CompactTextString(m) }
+func (*Snapshot) ProtoMessage() {}
+
+func (m *Snapshot) GetTs() int64 {
+ if m != nil && m.Ts != nil {
+ return *m.Ts
+ }
+ return 0
+}
+
+type InternalHeader struct {
+ Qos *string `protobuf:"bytes,1,opt,name=qos" json:"qos,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *InternalHeader) Reset() { *m = InternalHeader{} }
+func (m *InternalHeader) String() string { return proto.CompactTextString(m) }
+func (*InternalHeader) ProtoMessage() {}
+
+func (m *InternalHeader) GetQos() string {
+ if m != nil && m.Qos != nil {
+ return *m.Qos
+ }
+ return ""
+}
+
+type Transaction struct {
+ Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"`
+ Handle *uint64 `protobuf:"fixed64,1,req,name=handle" json:"handle,omitempty"`
+ App *string `protobuf:"bytes,2,req,name=app" json:"app,omitempty"`
+ MarkChanges *bool `protobuf:"varint,3,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Transaction) Reset() { *m = Transaction{} }
+func (m *Transaction) String() string { return proto.CompactTextString(m) }
+func (*Transaction) ProtoMessage() {}
+
+const Default_Transaction_MarkChanges bool = false
+
+func (m *Transaction) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *Transaction) GetHandle() uint64 {
+ if m != nil && m.Handle != nil {
+ return *m.Handle
+ }
+ return 0
+}
+
+func (m *Transaction) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+func (m *Transaction) GetMarkChanges() bool {
+ if m != nil && m.MarkChanges != nil {
+ return *m.MarkChanges
+ }
+ return Default_Transaction_MarkChanges
+}
+
+type Query struct {
+ Header *InternalHeader `protobuf:"bytes,39,opt,name=header" json:"header,omitempty"`
+ App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"`
+ NameSpace *string `protobuf:"bytes,29,opt,name=name_space" json:"name_space,omitempty"`
+ Kind *string `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"`
+ Ancestor *Reference `protobuf:"bytes,17,opt,name=ancestor" json:"ancestor,omitempty"`
+ Filter []*Query_Filter `protobuf:"group,4,rep,name=Filter" json:"filter,omitempty"`
+ SearchQuery *string `protobuf:"bytes,8,opt,name=search_query" json:"search_query,omitempty"`
+ Order []*Query_Order `protobuf:"group,9,rep,name=Order" json:"order,omitempty"`
+ Hint *Query_Hint `protobuf:"varint,18,opt,name=hint,enum=appengine.Query_Hint" json:"hint,omitempty"`
+ Count *int32 `protobuf:"varint,23,opt,name=count" json:"count,omitempty"`
+ Offset *int32 `protobuf:"varint,12,opt,name=offset,def=0" json:"offset,omitempty"`
+ Limit *int32 `protobuf:"varint,16,opt,name=limit" json:"limit,omitempty"`
+ CompiledCursor *CompiledCursor `protobuf:"bytes,30,opt,name=compiled_cursor" json:"compiled_cursor,omitempty"`
+ EndCompiledCursor *CompiledCursor `protobuf:"bytes,31,opt,name=end_compiled_cursor" json:"end_compiled_cursor,omitempty"`
+ CompositeIndex []*CompositeIndex `protobuf:"bytes,19,rep,name=composite_index" json:"composite_index,omitempty"`
+ RequirePerfectPlan *bool `protobuf:"varint,20,opt,name=require_perfect_plan,def=0" json:"require_perfect_plan,omitempty"`
+ KeysOnly *bool `protobuf:"varint,21,opt,name=keys_only,def=0" json:"keys_only,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,22,opt,name=transaction" json:"transaction,omitempty"`
+ Compile *bool `protobuf:"varint,25,opt,name=compile,def=0" json:"compile,omitempty"`
+ FailoverMs *int64 `protobuf:"varint,26,opt,name=failover_ms" json:"failover_ms,omitempty"`
+ Strong *bool `protobuf:"varint,32,opt,name=strong" json:"strong,omitempty"`
+ PropertyName []string `protobuf:"bytes,33,rep,name=property_name" json:"property_name,omitempty"`
+ GroupByPropertyName []string `protobuf:"bytes,34,rep,name=group_by_property_name" json:"group_by_property_name,omitempty"`
+ Distinct *bool `protobuf:"varint,24,opt,name=distinct" json:"distinct,omitempty"`
+ MinSafeTimeSeconds *int64 `protobuf:"varint,35,opt,name=min_safe_time_seconds" json:"min_safe_time_seconds,omitempty"`
+ SafeReplicaName []string `protobuf:"bytes,36,rep,name=safe_replica_name" json:"safe_replica_name,omitempty"`
+ PersistOffset *bool `protobuf:"varint,37,opt,name=persist_offset,def=0" json:"persist_offset,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Query) Reset() { *m = Query{} }
+func (m *Query) String() string { return proto.CompactTextString(m) }
+func (*Query) ProtoMessage() {}
+
+const Default_Query_Offset int32 = 0
+const Default_Query_RequirePerfectPlan bool = false
+const Default_Query_KeysOnly bool = false
+const Default_Query_Compile bool = false
+const Default_Query_PersistOffset bool = false
+
+func (m *Query) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *Query) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+func (m *Query) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *Query) GetKind() string {
+ if m != nil && m.Kind != nil {
+ return *m.Kind
+ }
+ return ""
+}
+
+func (m *Query) GetAncestor() *Reference {
+ if m != nil {
+ return m.Ancestor
+ }
+ return nil
+}
+
+func (m *Query) GetFilter() []*Query_Filter {
+ if m != nil {
+ return m.Filter
+ }
+ return nil
+}
+
+func (m *Query) GetSearchQuery() string {
+ if m != nil && m.SearchQuery != nil {
+ return *m.SearchQuery
+ }
+ return ""
+}
+
+func (m *Query) GetOrder() []*Query_Order {
+ if m != nil {
+ return m.Order
+ }
+ return nil
+}
+
+func (m *Query) GetHint() Query_Hint {
+ if m != nil && m.Hint != nil {
+ return *m.Hint
+ }
+ return Query_ORDER_FIRST
+}
+
+func (m *Query) GetCount() int32 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+func (m *Query) GetOffset() int32 {
+ if m != nil && m.Offset != nil {
+ return *m.Offset
+ }
+ return Default_Query_Offset
+}
+
+func (m *Query) GetLimit() int32 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return 0
+}
+
+func (m *Query) GetCompiledCursor() *CompiledCursor {
+ if m != nil {
+ return m.CompiledCursor
+ }
+ return nil
+}
+
+func (m *Query) GetEndCompiledCursor() *CompiledCursor {
+ if m != nil {
+ return m.EndCompiledCursor
+ }
+ return nil
+}
+
+func (m *Query) GetCompositeIndex() []*CompositeIndex {
+ if m != nil {
+ return m.CompositeIndex
+ }
+ return nil
+}
+
+func (m *Query) GetRequirePerfectPlan() bool {
+ if m != nil && m.RequirePerfectPlan != nil {
+ return *m.RequirePerfectPlan
+ }
+ return Default_Query_RequirePerfectPlan
+}
+
+func (m *Query) GetKeysOnly() bool {
+ if m != nil && m.KeysOnly != nil {
+ return *m.KeysOnly
+ }
+ return Default_Query_KeysOnly
+}
+
+func (m *Query) GetTransaction() *Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *Query) GetCompile() bool {
+ if m != nil && m.Compile != nil {
+ return *m.Compile
+ }
+ return Default_Query_Compile
+}
+
+func (m *Query) GetFailoverMs() int64 {
+ if m != nil && m.FailoverMs != nil {
+ return *m.FailoverMs
+ }
+ return 0
+}
+
+func (m *Query) GetStrong() bool {
+ if m != nil && m.Strong != nil {
+ return *m.Strong
+ }
+ return false
+}
+
+func (m *Query) GetPropertyName() []string {
+ if m != nil {
+ return m.PropertyName
+ }
+ return nil
+}
+
+func (m *Query) GetGroupByPropertyName() []string {
+ if m != nil {
+ return m.GroupByPropertyName
+ }
+ return nil
+}
+
+func (m *Query) GetDistinct() bool {
+ if m != nil && m.Distinct != nil {
+ return *m.Distinct
+ }
+ return false
+}
+
+func (m *Query) GetMinSafeTimeSeconds() int64 {
+ if m != nil && m.MinSafeTimeSeconds != nil {
+ return *m.MinSafeTimeSeconds
+ }
+ return 0
+}
+
+func (m *Query) GetSafeReplicaName() []string {
+ if m != nil {
+ return m.SafeReplicaName
+ }
+ return nil
+}
+
+func (m *Query) GetPersistOffset() bool {
+ if m != nil && m.PersistOffset != nil {
+ return *m.PersistOffset
+ }
+ return Default_Query_PersistOffset
+}
+
+type Query_Filter struct {
+ Op *Query_Filter_Operator `protobuf:"varint,6,req,name=op,enum=appengine.Query_Filter_Operator" json:"op,omitempty"`
+ Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Query_Filter) Reset() { *m = Query_Filter{} }
+func (m *Query_Filter) String() string { return proto.CompactTextString(m) }
+func (*Query_Filter) ProtoMessage() {}
+
+func (m *Query_Filter) GetOp() Query_Filter_Operator {
+ if m != nil && m.Op != nil {
+ return *m.Op
+ }
+ return Query_Filter_LESS_THAN
+}
+
+func (m *Query_Filter) GetProperty() []*Property {
+ if m != nil {
+ return m.Property
+ }
+ return nil
+}
+
+type Query_Order struct {
+ Property *string `protobuf:"bytes,10,req,name=property" json:"property,omitempty"`
+ Direction *Query_Order_Direction `protobuf:"varint,11,opt,name=direction,enum=appengine.Query_Order_Direction,def=1" json:"direction,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Query_Order) Reset() { *m = Query_Order{} }
+func (m *Query_Order) String() string { return proto.CompactTextString(m) }
+func (*Query_Order) ProtoMessage() {}
+
+const Default_Query_Order_Direction Query_Order_Direction = Query_Order_ASCENDING
+
+func (m *Query_Order) GetProperty() string {
+ if m != nil && m.Property != nil {
+ return *m.Property
+ }
+ return ""
+}
+
+func (m *Query_Order) GetDirection() Query_Order_Direction {
+ if m != nil && m.Direction != nil {
+ return *m.Direction
+ }
+ return Default_Query_Order_Direction
+}
+
+type CompiledQuery struct {
+ Primaryscan *CompiledQuery_PrimaryScan `protobuf:"group,1,req,name=PrimaryScan" json:"primaryscan,omitempty"`
+ Mergejoinscan []*CompiledQuery_MergeJoinScan `protobuf:"group,7,rep,name=MergeJoinScan" json:"mergejoinscan,omitempty"`
+ IndexDef *Index `protobuf:"bytes,21,opt,name=index_def" json:"index_def,omitempty"`
+ Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"`
+ Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"`
+ KeysOnly *bool `protobuf:"varint,12,req,name=keys_only" json:"keys_only,omitempty"`
+ PropertyName []string `protobuf:"bytes,24,rep,name=property_name" json:"property_name,omitempty"`
+ DistinctInfixSize *int32 `protobuf:"varint,25,opt,name=distinct_infix_size" json:"distinct_infix_size,omitempty"`
+ Entityfilter *CompiledQuery_EntityFilter `protobuf:"group,13,opt,name=EntityFilter" json:"entityfilter,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledQuery) Reset() { *m = CompiledQuery{} }
+func (m *CompiledQuery) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery) ProtoMessage() {}
+
+const Default_CompiledQuery_Offset int32 = 0
+
+func (m *CompiledQuery) GetPrimaryscan() *CompiledQuery_PrimaryScan {
+ if m != nil {
+ return m.Primaryscan
+ }
+ return nil
+}
+
+func (m *CompiledQuery) GetMergejoinscan() []*CompiledQuery_MergeJoinScan {
+ if m != nil {
+ return m.Mergejoinscan
+ }
+ return nil
+}
+
+func (m *CompiledQuery) GetIndexDef() *Index {
+ if m != nil {
+ return m.IndexDef
+ }
+ return nil
+}
+
+func (m *CompiledQuery) GetOffset() int32 {
+ if m != nil && m.Offset != nil {
+ return *m.Offset
+ }
+ return Default_CompiledQuery_Offset
+}
+
+func (m *CompiledQuery) GetLimit() int32 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return 0
+}
+
+func (m *CompiledQuery) GetKeysOnly() bool {
+ if m != nil && m.KeysOnly != nil {
+ return *m.KeysOnly
+ }
+ return false
+}
+
+func (m *CompiledQuery) GetPropertyName() []string {
+ if m != nil {
+ return m.PropertyName
+ }
+ return nil
+}
+
+func (m *CompiledQuery) GetDistinctInfixSize() int32 {
+ if m != nil && m.DistinctInfixSize != nil {
+ return *m.DistinctInfixSize
+ }
+ return 0
+}
+
+func (m *CompiledQuery) GetEntityfilter() *CompiledQuery_EntityFilter {
+ if m != nil {
+ return m.Entityfilter
+ }
+ return nil
+}
+
+type CompiledQuery_PrimaryScan struct {
+ IndexName *string `protobuf:"bytes,2,opt,name=index_name" json:"index_name,omitempty"`
+ StartKey *string `protobuf:"bytes,3,opt,name=start_key" json:"start_key,omitempty"`
+ StartInclusive *bool `protobuf:"varint,4,opt,name=start_inclusive" json:"start_inclusive,omitempty"`
+ EndKey *string `protobuf:"bytes,5,opt,name=end_key" json:"end_key,omitempty"`
+ EndInclusive *bool `protobuf:"varint,6,opt,name=end_inclusive" json:"end_inclusive,omitempty"`
+ StartPostfixValue []string `protobuf:"bytes,22,rep,name=start_postfix_value" json:"start_postfix_value,omitempty"`
+ EndPostfixValue []string `protobuf:"bytes,23,rep,name=end_postfix_value" json:"end_postfix_value,omitempty"`
+ EndUnappliedLogTimestampUs *int64 `protobuf:"varint,19,opt,name=end_unapplied_log_timestamp_us" json:"end_unapplied_log_timestamp_us,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledQuery_PrimaryScan) Reset() { *m = CompiledQuery_PrimaryScan{} }
+func (m *CompiledQuery_PrimaryScan) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery_PrimaryScan) ProtoMessage() {}
+
+func (m *CompiledQuery_PrimaryScan) GetIndexName() string {
+ if m != nil && m.IndexName != nil {
+ return *m.IndexName
+ }
+ return ""
+}
+
+func (m *CompiledQuery_PrimaryScan) GetStartKey() string {
+ if m != nil && m.StartKey != nil {
+ return *m.StartKey
+ }
+ return ""
+}
+
+func (m *CompiledQuery_PrimaryScan) GetStartInclusive() bool {
+ if m != nil && m.StartInclusive != nil {
+ return *m.StartInclusive
+ }
+ return false
+}
+
+func (m *CompiledQuery_PrimaryScan) GetEndKey() string {
+ if m != nil && m.EndKey != nil {
+ return *m.EndKey
+ }
+ return ""
+}
+
+func (m *CompiledQuery_PrimaryScan) GetEndInclusive() bool {
+ if m != nil && m.EndInclusive != nil {
+ return *m.EndInclusive
+ }
+ return false
+}
+
+func (m *CompiledQuery_PrimaryScan) GetStartPostfixValue() []string {
+ if m != nil {
+ return m.StartPostfixValue
+ }
+ return nil
+}
+
+func (m *CompiledQuery_PrimaryScan) GetEndPostfixValue() []string {
+ if m != nil {
+ return m.EndPostfixValue
+ }
+ return nil
+}
+
+func (m *CompiledQuery_PrimaryScan) GetEndUnappliedLogTimestampUs() int64 {
+ if m != nil && m.EndUnappliedLogTimestampUs != nil {
+ return *m.EndUnappliedLogTimestampUs
+ }
+ return 0
+}
+
+type CompiledQuery_MergeJoinScan struct {
+ IndexName *string `protobuf:"bytes,8,req,name=index_name" json:"index_name,omitempty"`
+ PrefixValue []string `protobuf:"bytes,9,rep,name=prefix_value" json:"prefix_value,omitempty"`
+ ValuePrefix *bool `protobuf:"varint,20,opt,name=value_prefix,def=0" json:"value_prefix,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledQuery_MergeJoinScan) Reset() { *m = CompiledQuery_MergeJoinScan{} }
+func (m *CompiledQuery_MergeJoinScan) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery_MergeJoinScan) ProtoMessage() {}
+
+const Default_CompiledQuery_MergeJoinScan_ValuePrefix bool = false
+
+func (m *CompiledQuery_MergeJoinScan) GetIndexName() string {
+ if m != nil && m.IndexName != nil {
+ return *m.IndexName
+ }
+ return ""
+}
+
+func (m *CompiledQuery_MergeJoinScan) GetPrefixValue() []string {
+ if m != nil {
+ return m.PrefixValue
+ }
+ return nil
+}
+
+func (m *CompiledQuery_MergeJoinScan) GetValuePrefix() bool {
+ if m != nil && m.ValuePrefix != nil {
+ return *m.ValuePrefix
+ }
+ return Default_CompiledQuery_MergeJoinScan_ValuePrefix
+}
+
+type CompiledQuery_EntityFilter struct {
+ Distinct *bool `protobuf:"varint,14,opt,name=distinct,def=0" json:"distinct,omitempty"`
+ Kind *string `protobuf:"bytes,17,opt,name=kind" json:"kind,omitempty"`
+ Ancestor *Reference `protobuf:"bytes,18,opt,name=ancestor" json:"ancestor,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledQuery_EntityFilter) Reset() { *m = CompiledQuery_EntityFilter{} }
+func (m *CompiledQuery_EntityFilter) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery_EntityFilter) ProtoMessage() {}
+
+const Default_CompiledQuery_EntityFilter_Distinct bool = false
+
+func (m *CompiledQuery_EntityFilter) GetDistinct() bool {
+ if m != nil && m.Distinct != nil {
+ return *m.Distinct
+ }
+ return Default_CompiledQuery_EntityFilter_Distinct
+}
+
+func (m *CompiledQuery_EntityFilter) GetKind() string {
+ if m != nil && m.Kind != nil {
+ return *m.Kind
+ }
+ return ""
+}
+
+func (m *CompiledQuery_EntityFilter) GetAncestor() *Reference {
+ if m != nil {
+ return m.Ancestor
+ }
+ return nil
+}
+
+type CompiledCursor struct {
+ Position *CompiledCursor_Position `protobuf:"group,2,opt,name=Position" json:"position,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledCursor) Reset() { *m = CompiledCursor{} }
+func (m *CompiledCursor) String() string { return proto.CompactTextString(m) }
+func (*CompiledCursor) ProtoMessage() {}
+
+func (m *CompiledCursor) GetPosition() *CompiledCursor_Position {
+ if m != nil {
+ return m.Position
+ }
+ return nil
+}
+
+type CompiledCursor_Position struct {
+ StartKey *string `protobuf:"bytes,27,opt,name=start_key" json:"start_key,omitempty"`
+ Indexvalue []*CompiledCursor_Position_IndexValue `protobuf:"group,29,rep,name=IndexValue" json:"indexvalue,omitempty"`
+ Key *Reference `protobuf:"bytes,32,opt,name=key" json:"key,omitempty"`
+ StartInclusive *bool `protobuf:"varint,28,opt,name=start_inclusive,def=1" json:"start_inclusive,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledCursor_Position) Reset() { *m = CompiledCursor_Position{} }
+func (m *CompiledCursor_Position) String() string { return proto.CompactTextString(m) }
+func (*CompiledCursor_Position) ProtoMessage() {}
+
+const Default_CompiledCursor_Position_StartInclusive bool = true
+
+func (m *CompiledCursor_Position) GetStartKey() string {
+ if m != nil && m.StartKey != nil {
+ return *m.StartKey
+ }
+ return ""
+}
+
+func (m *CompiledCursor_Position) GetIndexvalue() []*CompiledCursor_Position_IndexValue {
+ if m != nil {
+ return m.Indexvalue
+ }
+ return nil
+}
+
+func (m *CompiledCursor_Position) GetKey() *Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *CompiledCursor_Position) GetStartInclusive() bool {
+ if m != nil && m.StartInclusive != nil {
+ return *m.StartInclusive
+ }
+ return Default_CompiledCursor_Position_StartInclusive
+}
+
+type CompiledCursor_Position_IndexValue struct {
+ Property *string `protobuf:"bytes,30,opt,name=property" json:"property,omitempty"`
+ Value *PropertyValue `protobuf:"bytes,31,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledCursor_Position_IndexValue) Reset() { *m = CompiledCursor_Position_IndexValue{} }
+func (m *CompiledCursor_Position_IndexValue) String() string { return proto.CompactTextString(m) }
+func (*CompiledCursor_Position_IndexValue) ProtoMessage() {}
+
+func (m *CompiledCursor_Position_IndexValue) GetProperty() string {
+ if m != nil && m.Property != nil {
+ return *m.Property
+ }
+ return ""
+}
+
+func (m *CompiledCursor_Position_IndexValue) GetValue() *PropertyValue {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type Cursor struct {
+ Cursor *uint64 `protobuf:"fixed64,1,req,name=cursor" json:"cursor,omitempty"`
+ App *string `protobuf:"bytes,2,opt,name=app" json:"app,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Cursor) Reset() { *m = Cursor{} }
+func (m *Cursor) String() string { return proto.CompactTextString(m) }
+func (*Cursor) ProtoMessage() {}
+
+func (m *Cursor) GetCursor() uint64 {
+ if m != nil && m.Cursor != nil {
+ return *m.Cursor
+ }
+ return 0
+}
+
+func (m *Cursor) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+type Error struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Error) Reset() { *m = Error{} }
+func (m *Error) String() string { return proto.CompactTextString(m) }
+func (*Error) ProtoMessage() {}
+
+type Cost struct {
+ IndexWrites *int32 `protobuf:"varint,1,opt,name=index_writes" json:"index_writes,omitempty"`
+ IndexWriteBytes *int32 `protobuf:"varint,2,opt,name=index_write_bytes" json:"index_write_bytes,omitempty"`
+ EntityWrites *int32 `protobuf:"varint,3,opt,name=entity_writes" json:"entity_writes,omitempty"`
+ EntityWriteBytes *int32 `protobuf:"varint,4,opt,name=entity_write_bytes" json:"entity_write_bytes,omitempty"`
+ Commitcost *Cost_CommitCost `protobuf:"group,5,opt,name=CommitCost" json:"commitcost,omitempty"`
+ ApproximateStorageDelta *int32 `protobuf:"varint,8,opt,name=approximate_storage_delta" json:"approximate_storage_delta,omitempty"`
+ IdSequenceUpdates *int32 `protobuf:"varint,9,opt,name=id_sequence_updates" json:"id_sequence_updates,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Cost) Reset() { *m = Cost{} }
+func (m *Cost) String() string { return proto.CompactTextString(m) }
+func (*Cost) ProtoMessage() {}
+
+func (m *Cost) GetIndexWrites() int32 {
+ if m != nil && m.IndexWrites != nil {
+ return *m.IndexWrites
+ }
+ return 0
+}
+
+func (m *Cost) GetIndexWriteBytes() int32 {
+ if m != nil && m.IndexWriteBytes != nil {
+ return *m.IndexWriteBytes
+ }
+ return 0
+}
+
+func (m *Cost) GetEntityWrites() int32 {
+ if m != nil && m.EntityWrites != nil {
+ return *m.EntityWrites
+ }
+ return 0
+}
+
+func (m *Cost) GetEntityWriteBytes() int32 {
+ if m != nil && m.EntityWriteBytes != nil {
+ return *m.EntityWriteBytes
+ }
+ return 0
+}
+
+func (m *Cost) GetCommitcost() *Cost_CommitCost {
+ if m != nil {
+ return m.Commitcost
+ }
+ return nil
+}
+
+func (m *Cost) GetApproximateStorageDelta() int32 {
+ if m != nil && m.ApproximateStorageDelta != nil {
+ return *m.ApproximateStorageDelta
+ }
+ return 0
+}
+
+func (m *Cost) GetIdSequenceUpdates() int32 {
+ if m != nil && m.IdSequenceUpdates != nil {
+ return *m.IdSequenceUpdates
+ }
+ return 0
+}
+
+type Cost_CommitCost struct {
+ RequestedEntityPuts *int32 `protobuf:"varint,6,opt,name=requested_entity_puts" json:"requested_entity_puts,omitempty"`
+ RequestedEntityDeletes *int32 `protobuf:"varint,7,opt,name=requested_entity_deletes" json:"requested_entity_deletes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Cost_CommitCost) Reset() { *m = Cost_CommitCost{} }
+func (m *Cost_CommitCost) String() string { return proto.CompactTextString(m) }
+func (*Cost_CommitCost) ProtoMessage() {}
+
+func (m *Cost_CommitCost) GetRequestedEntityPuts() int32 {
+ if m != nil && m.RequestedEntityPuts != nil {
+ return *m.RequestedEntityPuts
+ }
+ return 0
+}
+
+func (m *Cost_CommitCost) GetRequestedEntityDeletes() int32 {
+ if m != nil && m.RequestedEntityDeletes != nil {
+ return *m.RequestedEntityDeletes
+ }
+ return 0
+}
+
+type GetRequest struct {
+ Header *InternalHeader `protobuf:"bytes,6,opt,name=header" json:"header,omitempty"`
+ Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"`
+ FailoverMs *int64 `protobuf:"varint,3,opt,name=failover_ms" json:"failover_ms,omitempty"`
+ Strong *bool `protobuf:"varint,4,opt,name=strong" json:"strong,omitempty"`
+ AllowDeferred *bool `protobuf:"varint,5,opt,name=allow_deferred,def=0" json:"allow_deferred,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetRequest) Reset() { *m = GetRequest{} }
+func (m *GetRequest) String() string { return proto.CompactTextString(m) }
+func (*GetRequest) ProtoMessage() {}
+
+const Default_GetRequest_AllowDeferred bool = false
+
+func (m *GetRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *GetRequest) GetKey() []*Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *GetRequest) GetTransaction() *Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *GetRequest) GetFailoverMs() int64 {
+ if m != nil && m.FailoverMs != nil {
+ return *m.FailoverMs
+ }
+ return 0
+}
+
+func (m *GetRequest) GetStrong() bool {
+ if m != nil && m.Strong != nil {
+ return *m.Strong
+ }
+ return false
+}
+
+func (m *GetRequest) GetAllowDeferred() bool {
+ if m != nil && m.AllowDeferred != nil {
+ return *m.AllowDeferred
+ }
+ return Default_GetRequest_AllowDeferred
+}
+
+type GetResponse struct {
+ Entity []*GetResponse_Entity `protobuf:"group,1,rep,name=Entity" json:"entity,omitempty"`
+ Deferred []*Reference `protobuf:"bytes,5,rep,name=deferred" json:"deferred,omitempty"`
+ InOrder *bool `protobuf:"varint,6,opt,name=in_order,def=1" json:"in_order,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetResponse) Reset() { *m = GetResponse{} }
+func (m *GetResponse) String() string { return proto.CompactTextString(m) }
+func (*GetResponse) ProtoMessage() {}
+
+const Default_GetResponse_InOrder bool = true
+
+func (m *GetResponse) GetEntity() []*GetResponse_Entity {
+ if m != nil {
+ return m.Entity
+ }
+ return nil
+}
+
+func (m *GetResponse) GetDeferred() []*Reference {
+ if m != nil {
+ return m.Deferred
+ }
+ return nil
+}
+
+func (m *GetResponse) GetInOrder() bool {
+ if m != nil && m.InOrder != nil {
+ return *m.InOrder
+ }
+ return Default_GetResponse_InOrder
+}
+
+type GetResponse_Entity struct {
+ Entity *EntityProto `protobuf:"bytes,2,opt,name=entity" json:"entity,omitempty"`
+ Key *Reference `protobuf:"bytes,4,opt,name=key" json:"key,omitempty"`
+ Version *int64 `protobuf:"varint,3,opt,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetResponse_Entity) Reset() { *m = GetResponse_Entity{} }
+func (m *GetResponse_Entity) String() string { return proto.CompactTextString(m) }
+func (*GetResponse_Entity) ProtoMessage() {}
+
+func (m *GetResponse_Entity) GetEntity() *EntityProto {
+ if m != nil {
+ return m.Entity
+ }
+ return nil
+}
+
+func (m *GetResponse_Entity) GetKey() *Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *GetResponse_Entity) GetVersion() int64 {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return 0
+}
+
+type PutRequest struct {
+ Header *InternalHeader `protobuf:"bytes,11,opt,name=header" json:"header,omitempty"`
+ Entity []*EntityProto `protobuf:"bytes,1,rep,name=entity" json:"entity,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"`
+ CompositeIndex []*CompositeIndex `protobuf:"bytes,3,rep,name=composite_index" json:"composite_index,omitempty"`
+ Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"`
+ Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"`
+ MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"`
+ Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
+ AutoIdPolicy *PutRequest_AutoIdPolicy `protobuf:"varint,10,opt,name=auto_id_policy,enum=appengine.PutRequest_AutoIdPolicy,def=0" json:"auto_id_policy,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PutRequest) Reset() { *m = PutRequest{} }
+func (m *PutRequest) String() string { return proto.CompactTextString(m) }
+func (*PutRequest) ProtoMessage() {}
+
+const Default_PutRequest_Trusted bool = false
+const Default_PutRequest_Force bool = false
+const Default_PutRequest_MarkChanges bool = false
+const Default_PutRequest_AutoIdPolicy PutRequest_AutoIdPolicy = PutRequest_CURRENT
+
+func (m *PutRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *PutRequest) GetEntity() []*EntityProto {
+ if m != nil {
+ return m.Entity
+ }
+ return nil
+}
+
+func (m *PutRequest) GetTransaction() *Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *PutRequest) GetCompositeIndex() []*CompositeIndex {
+ if m != nil {
+ return m.CompositeIndex
+ }
+ return nil
+}
+
+func (m *PutRequest) GetTrusted() bool {
+ if m != nil && m.Trusted != nil {
+ return *m.Trusted
+ }
+ return Default_PutRequest_Trusted
+}
+
+func (m *PutRequest) GetForce() bool {
+ if m != nil && m.Force != nil {
+ return *m.Force
+ }
+ return Default_PutRequest_Force
+}
+
+func (m *PutRequest) GetMarkChanges() bool {
+ if m != nil && m.MarkChanges != nil {
+ return *m.MarkChanges
+ }
+ return Default_PutRequest_MarkChanges
+}
+
+func (m *PutRequest) GetSnapshot() []*Snapshot {
+ if m != nil {
+ return m.Snapshot
+ }
+ return nil
+}
+
+func (m *PutRequest) GetAutoIdPolicy() PutRequest_AutoIdPolicy {
+ if m != nil && m.AutoIdPolicy != nil {
+ return *m.AutoIdPolicy
+ }
+ return Default_PutRequest_AutoIdPolicy
+}
+
+type PutResponse struct {
+ Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+ Cost *Cost `protobuf:"bytes,2,opt,name=cost" json:"cost,omitempty"`
+ Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PutResponse) Reset() { *m = PutResponse{} }
+func (m *PutResponse) String() string { return proto.CompactTextString(m) }
+func (*PutResponse) ProtoMessage() {}
+
+func (m *PutResponse) GetKey() []*Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *PutResponse) GetCost() *Cost {
+ if m != nil {
+ return m.Cost
+ }
+ return nil
+}
+
+func (m *PutResponse) GetVersion() []int64 {
+ if m != nil {
+ return m.Version
+ }
+ return nil
+}
+
+type TouchRequest struct {
+ Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"`
+ Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+ CompositeIndex []*CompositeIndex `protobuf:"bytes,2,rep,name=composite_index" json:"composite_index,omitempty"`
+ Force *bool `protobuf:"varint,3,opt,name=force,def=0" json:"force,omitempty"`
+ Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TouchRequest) Reset() { *m = TouchRequest{} }
+func (m *TouchRequest) String() string { return proto.CompactTextString(m) }
+func (*TouchRequest) ProtoMessage() {}
+
+const Default_TouchRequest_Force bool = false
+
+func (m *TouchRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *TouchRequest) GetKey() []*Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *TouchRequest) GetCompositeIndex() []*CompositeIndex {
+ if m != nil {
+ return m.CompositeIndex
+ }
+ return nil
+}
+
+func (m *TouchRequest) GetForce() bool {
+ if m != nil && m.Force != nil {
+ return *m.Force
+ }
+ return Default_TouchRequest_Force
+}
+
+func (m *TouchRequest) GetSnapshot() []*Snapshot {
+ if m != nil {
+ return m.Snapshot
+ }
+ return nil
+}
+
+type TouchResponse struct {
+ Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TouchResponse) Reset() { *m = TouchResponse{} }
+func (m *TouchResponse) String() string { return proto.CompactTextString(m) }
+func (*TouchResponse) ProtoMessage() {}
+
+func (m *TouchResponse) GetCost() *Cost {
+ if m != nil {
+ return m.Cost
+ }
+ return nil
+}
+
+type DeleteRequest struct {
+ Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"`
+ Key []*Reference `protobuf:"bytes,6,rep,name=key" json:"key,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,5,opt,name=transaction" json:"transaction,omitempty"`
+ Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"`
+ Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"`
+ MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"`
+ Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteRequest) Reset() { *m = DeleteRequest{} }
+func (m *DeleteRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteRequest) ProtoMessage() {}
+
+const Default_DeleteRequest_Trusted bool = false
+const Default_DeleteRequest_Force bool = false
+const Default_DeleteRequest_MarkChanges bool = false
+
+func (m *DeleteRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *DeleteRequest) GetKey() []*Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *DeleteRequest) GetTransaction() *Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *DeleteRequest) GetTrusted() bool {
+ if m != nil && m.Trusted != nil {
+ return *m.Trusted
+ }
+ return Default_DeleteRequest_Trusted
+}
+
+func (m *DeleteRequest) GetForce() bool {
+ if m != nil && m.Force != nil {
+ return *m.Force
+ }
+ return Default_DeleteRequest_Force
+}
+
+func (m *DeleteRequest) GetMarkChanges() bool {
+ if m != nil && m.MarkChanges != nil {
+ return *m.MarkChanges
+ }
+ return Default_DeleteRequest_MarkChanges
+}
+
+func (m *DeleteRequest) GetSnapshot() []*Snapshot {
+ if m != nil {
+ return m.Snapshot
+ }
+ return nil
+}
+
+type DeleteResponse struct {
+ Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
+ Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteResponse) Reset() { *m = DeleteResponse{} }
+func (m *DeleteResponse) String() string { return proto.CompactTextString(m) }
+func (*DeleteResponse) ProtoMessage() {}
+
+func (m *DeleteResponse) GetCost() *Cost {
+ if m != nil {
+ return m.Cost
+ }
+ return nil
+}
+
+func (m *DeleteResponse) GetVersion() []int64 {
+ if m != nil {
+ return m.Version
+ }
+ return nil
+}
+
+type NextRequest struct {
+ Header *InternalHeader `protobuf:"bytes,5,opt,name=header" json:"header,omitempty"`
+ Cursor *Cursor `protobuf:"bytes,1,req,name=cursor" json:"cursor,omitempty"`
+ Count *int32 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"`
+ Offset *int32 `protobuf:"varint,4,opt,name=offset,def=0" json:"offset,omitempty"`
+ Compile *bool `protobuf:"varint,3,opt,name=compile,def=0" json:"compile,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *NextRequest) Reset() { *m = NextRequest{} }
+func (m *NextRequest) String() string { return proto.CompactTextString(m) }
+func (*NextRequest) ProtoMessage() {}
+
+const Default_NextRequest_Offset int32 = 0
+const Default_NextRequest_Compile bool = false
+
+func (m *NextRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *NextRequest) GetCursor() *Cursor {
+ if m != nil {
+ return m.Cursor
+ }
+ return nil
+}
+
+func (m *NextRequest) GetCount() int32 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+func (m *NextRequest) GetOffset() int32 {
+ if m != nil && m.Offset != nil {
+ return *m.Offset
+ }
+ return Default_NextRequest_Offset
+}
+
+func (m *NextRequest) GetCompile() bool {
+ if m != nil && m.Compile != nil {
+ return *m.Compile
+ }
+ return Default_NextRequest_Compile
+}
+
+type QueryResult struct {
+ Cursor *Cursor `protobuf:"bytes,1,opt,name=cursor" json:"cursor,omitempty"`
+ Result []*EntityProto `protobuf:"bytes,2,rep,name=result" json:"result,omitempty"`
+ SkippedResults *int32 `protobuf:"varint,7,opt,name=skipped_results" json:"skipped_results,omitempty"`
+ MoreResults *bool `protobuf:"varint,3,req,name=more_results" json:"more_results,omitempty"`
+ KeysOnly *bool `protobuf:"varint,4,opt,name=keys_only" json:"keys_only,omitempty"`
+ IndexOnly *bool `protobuf:"varint,9,opt,name=index_only" json:"index_only,omitempty"`
+ SmallOps *bool `protobuf:"varint,10,opt,name=small_ops" json:"small_ops,omitempty"`
+ CompiledQuery *CompiledQuery `protobuf:"bytes,5,opt,name=compiled_query" json:"compiled_query,omitempty"`
+ CompiledCursor *CompiledCursor `protobuf:"bytes,6,opt,name=compiled_cursor" json:"compiled_cursor,omitempty"`
+ Index []*CompositeIndex `protobuf:"bytes,8,rep,name=index" json:"index,omitempty"`
+ Version []int64 `protobuf:"varint,11,rep,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *QueryResult) Reset() { *m = QueryResult{} }
+func (m *QueryResult) String() string { return proto.CompactTextString(m) }
+func (*QueryResult) ProtoMessage() {}
+
+func (m *QueryResult) GetCursor() *Cursor {
+ if m != nil {
+ return m.Cursor
+ }
+ return nil
+}
+
+func (m *QueryResult) GetResult() []*EntityProto {
+ if m != nil {
+ return m.Result
+ }
+ return nil
+}
+
+func (m *QueryResult) GetSkippedResults() int32 {
+ if m != nil && m.SkippedResults != nil {
+ return *m.SkippedResults
+ }
+ return 0
+}
+
+func (m *QueryResult) GetMoreResults() bool {
+ if m != nil && m.MoreResults != nil {
+ return *m.MoreResults
+ }
+ return false
+}
+
+func (m *QueryResult) GetKeysOnly() bool {
+ if m != nil && m.KeysOnly != nil {
+ return *m.KeysOnly
+ }
+ return false
+}
+
+func (m *QueryResult) GetIndexOnly() bool {
+ if m != nil && m.IndexOnly != nil {
+ return *m.IndexOnly
+ }
+ return false
+}
+
+func (m *QueryResult) GetSmallOps() bool {
+ if m != nil && m.SmallOps != nil {
+ return *m.SmallOps
+ }
+ return false
+}
+
+func (m *QueryResult) GetCompiledQuery() *CompiledQuery {
+ if m != nil {
+ return m.CompiledQuery
+ }
+ return nil
+}
+
+func (m *QueryResult) GetCompiledCursor() *CompiledCursor {
+ if m != nil {
+ return m.CompiledCursor
+ }
+ return nil
+}
+
+func (m *QueryResult) GetIndex() []*CompositeIndex {
+ if m != nil {
+ return m.Index
+ }
+ return nil
+}
+
+func (m *QueryResult) GetVersion() []int64 {
+ if m != nil {
+ return m.Version
+ }
+ return nil
+}
+
+type AllocateIdsRequest struct {
+ Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"`
+ ModelKey *Reference `protobuf:"bytes,1,opt,name=model_key" json:"model_key,omitempty"`
+ Size *int64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"`
+ Max *int64 `protobuf:"varint,3,opt,name=max" json:"max,omitempty"`
+ Reserve []*Reference `protobuf:"bytes,5,rep,name=reserve" json:"reserve,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} }
+func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) }
+func (*AllocateIdsRequest) ProtoMessage() {}
+
+func (m *AllocateIdsRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *AllocateIdsRequest) GetModelKey() *Reference {
+ if m != nil {
+ return m.ModelKey
+ }
+ return nil
+}
+
+func (m *AllocateIdsRequest) GetSize() int64 {
+ if m != nil && m.Size != nil {
+ return *m.Size
+ }
+ return 0
+}
+
+func (m *AllocateIdsRequest) GetMax() int64 {
+ if m != nil && m.Max != nil {
+ return *m.Max
+ }
+ return 0
+}
+
+func (m *AllocateIdsRequest) GetReserve() []*Reference {
+ if m != nil {
+ return m.Reserve
+ }
+ return nil
+}
+
+type AllocateIdsResponse struct {
+ Start *int64 `protobuf:"varint,1,req,name=start" json:"start,omitempty"`
+ End *int64 `protobuf:"varint,2,req,name=end" json:"end,omitempty"`
+ Cost *Cost `protobuf:"bytes,3,opt,name=cost" json:"cost,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} }
+func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) }
+func (*AllocateIdsResponse) ProtoMessage() {}
+
+func (m *AllocateIdsResponse) GetStart() int64 {
+ if m != nil && m.Start != nil {
+ return *m.Start
+ }
+ return 0
+}
+
+func (m *AllocateIdsResponse) GetEnd() int64 {
+ if m != nil && m.End != nil {
+ return *m.End
+ }
+ return 0
+}
+
+func (m *AllocateIdsResponse) GetCost() *Cost {
+ if m != nil {
+ return m.Cost
+ }
+ return nil
+}
+
+type CompositeIndices struct {
+ Index []*CompositeIndex `protobuf:"bytes,1,rep,name=index" json:"index,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompositeIndices) Reset() { *m = CompositeIndices{} }
+func (m *CompositeIndices) String() string { return proto.CompactTextString(m) }
+func (*CompositeIndices) ProtoMessage() {}
+
+func (m *CompositeIndices) GetIndex() []*CompositeIndex {
+ if m != nil {
+ return m.Index
+ }
+ return nil
+}
+
+type AddActionsRequest struct {
+ Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"`
+ Action []*Action `protobuf:"bytes,2,rep,name=action" json:"action,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AddActionsRequest) Reset() { *m = AddActionsRequest{} }
+func (m *AddActionsRequest) String() string { return proto.CompactTextString(m) }
+func (*AddActionsRequest) ProtoMessage() {}
+
+func (m *AddActionsRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *AddActionsRequest) GetTransaction() *Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *AddActionsRequest) GetAction() []*Action {
+ if m != nil {
+ return m.Action
+ }
+ return nil
+}
+
+type AddActionsResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AddActionsResponse) Reset() { *m = AddActionsResponse{} }
+func (m *AddActionsResponse) String() string { return proto.CompactTextString(m) }
+func (*AddActionsResponse) ProtoMessage() {}
+
+type BeginTransactionRequest struct {
+ Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"`
+ App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"`
+ AllowMultipleEg *bool `protobuf:"varint,2,opt,name=allow_multiple_eg,def=0" json:"allow_multiple_eg,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} }
+func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) }
+func (*BeginTransactionRequest) ProtoMessage() {}
+
+const Default_BeginTransactionRequest_AllowMultipleEg bool = false
+
+func (m *BeginTransactionRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *BeginTransactionRequest) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+func (m *BeginTransactionRequest) GetAllowMultipleEg() bool {
+ if m != nil && m.AllowMultipleEg != nil {
+ return *m.AllowMultipleEg
+ }
+ return Default_BeginTransactionRequest_AllowMultipleEg
+}
+
+type CommitResponse struct {
+ Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
+ Version []*CommitResponse_Version `protobuf:"group,3,rep,name=Version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CommitResponse) Reset() { *m = CommitResponse{} }
+func (m *CommitResponse) String() string { return proto.CompactTextString(m) }
+func (*CommitResponse) ProtoMessage() {}
+
+func (m *CommitResponse) GetCost() *Cost {
+ if m != nil {
+ return m.Cost
+ }
+ return nil
+}
+
+func (m *CommitResponse) GetVersion() []*CommitResponse_Version {
+ if m != nil {
+ return m.Version
+ }
+ return nil
+}
+
+type CommitResponse_Version struct {
+ RootEntityKey *Reference `protobuf:"bytes,4,req,name=root_entity_key" json:"root_entity_key,omitempty"`
+ Version *int64 `protobuf:"varint,5,req,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CommitResponse_Version) Reset() { *m = CommitResponse_Version{} }
+func (m *CommitResponse_Version) String() string { return proto.CompactTextString(m) }
+func (*CommitResponse_Version) ProtoMessage() {}
+
+func (m *CommitResponse_Version) GetRootEntityKey() *Reference {
+ if m != nil {
+ return m.RootEntityKey
+ }
+ return nil
+}
+
+func (m *CommitResponse_Version) GetVersion() int64 {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return 0
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
new file mode 100755
index 0000000..e76f126
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto
@@ -0,0 +1,541 @@
+syntax = "proto2";
+option go_package = "datastore";
+
+package appengine;
+
+message Action{}
+
+message PropertyValue {
+ optional int64 int64Value = 1;
+ optional bool booleanValue = 2;
+ optional string stringValue = 3;
+ optional double doubleValue = 4;
+
+ optional group PointValue = 5 {
+ required double x = 6;
+ required double y = 7;
+ }
+
+ optional group UserValue = 8 {
+ required string email = 9;
+ required string auth_domain = 10;
+ optional string nickname = 11;
+ optional string federated_identity = 21;
+ optional string federated_provider = 22;
+ }
+
+ optional group ReferenceValue = 12 {
+ required string app = 13;
+ optional string name_space = 20;
+ repeated group PathElement = 14 {
+ required string type = 15;
+ optional int64 id = 16;
+ optional string name = 17;
+ }
+ }
+}
+
+message Property {
+ enum Meaning {
+ NO_MEANING = 0;
+ BLOB = 14;
+ TEXT = 15;
+ BYTESTRING = 16;
+
+ ATOM_CATEGORY = 1;
+ ATOM_LINK = 2;
+ ATOM_TITLE = 3;
+ ATOM_CONTENT = 4;
+ ATOM_SUMMARY = 5;
+ ATOM_AUTHOR = 6;
+
+ GD_WHEN = 7;
+ GD_EMAIL = 8;
+ GEORSS_POINT = 9;
+ GD_IM = 10;
+
+ GD_PHONENUMBER = 11;
+ GD_POSTALADDRESS = 12;
+
+ GD_RATING = 13;
+
+ BLOBKEY = 17;
+ ENTITY_PROTO = 19;
+
+ INDEX_VALUE = 18;
+ };
+
+ optional Meaning meaning = 1 [default = NO_MEANING];
+ optional string meaning_uri = 2;
+
+ required string name = 3;
+
+ required PropertyValue value = 5;
+
+ required bool multiple = 4;
+
+ optional bool searchable = 6 [default=false];
+
+ enum FtsTokenizationOption {
+ HTML = 1;
+ ATOM = 2;
+ }
+
+ optional FtsTokenizationOption fts_tokenization_option = 8;
+
+ optional string locale = 9 [default = "en"];
+}
+
+message Path {
+ repeated group Element = 1 {
+ required string type = 2;
+ optional int64 id = 3;
+ optional string name = 4;
+ }
+}
+
+message Reference {
+ required string app = 13;
+ optional string name_space = 20;
+ required Path path = 14;
+}
+
+message User {
+ required string email = 1;
+ required string auth_domain = 2;
+ optional string nickname = 3;
+ optional string federated_identity = 6;
+ optional string federated_provider = 7;
+}
+
+message EntityProto {
+ required Reference key = 13;
+ required Path entity_group = 16;
+ optional User owner = 17;
+
+ enum Kind {
+ GD_CONTACT = 1;
+ GD_EVENT = 2;
+ GD_MESSAGE = 3;
+ }
+ optional Kind kind = 4;
+ optional string kind_uri = 5;
+
+ repeated Property property = 14;
+ repeated Property raw_property = 15;
+
+ optional int32 rank = 18;
+}
+
+message CompositeProperty {
+ required int64 index_id = 1;
+ repeated string value = 2;
+}
+
+message Index {
+ required string entity_type = 1;
+ required bool ancestor = 5;
+ repeated group Property = 2 {
+ required string name = 3;
+ enum Direction {
+ ASCENDING = 1;
+ DESCENDING = 2;
+ }
+ optional Direction direction = 4 [default = ASCENDING];
+ }
+}
+
+message CompositeIndex {
+ required string app_id = 1;
+ required int64 id = 2;
+ required Index definition = 3;
+
+ enum State {
+ WRITE_ONLY = 1;
+ READ_WRITE = 2;
+ DELETED = 3;
+ ERROR = 4;
+ }
+ required State state = 4;
+
+ optional bool only_use_if_required = 6 [default = false];
+}
+
+message IndexPostfix {
+ message IndexValue {
+ required string property_name = 1;
+ required PropertyValue value = 2;
+ }
+
+ repeated IndexValue index_value = 1;
+
+ optional Reference key = 2;
+
+ optional bool before = 3 [default=true];
+}
+
+message IndexPosition {
+ optional string key = 1;
+
+ optional bool before = 2 [default=true];
+}
+
+message Snapshot {
+ enum Status {
+ INACTIVE = 0;
+ ACTIVE = 1;
+ }
+
+ required int64 ts = 1;
+}
+
+message InternalHeader {
+ optional string qos = 1;
+}
+
+message Transaction {
+ optional InternalHeader header = 4;
+ required fixed64 handle = 1;
+ required string app = 2;
+ optional bool mark_changes = 3 [default = false];
+}
+
+message Query {
+ optional InternalHeader header = 39;
+
+ required string app = 1;
+ optional string name_space = 29;
+
+ optional string kind = 3;
+ optional Reference ancestor = 17;
+
+ repeated group Filter = 4 {
+ enum Operator {
+ LESS_THAN = 1;
+ LESS_THAN_OR_EQUAL = 2;
+ GREATER_THAN = 3;
+ GREATER_THAN_OR_EQUAL = 4;
+ EQUAL = 5;
+ IN = 6;
+ EXISTS = 7;
+ }
+
+ required Operator op = 6;
+ repeated Property property = 14;
+ }
+
+ optional string search_query = 8;
+
+ repeated group Order = 9 {
+ enum Direction {
+ ASCENDING = 1;
+ DESCENDING = 2;
+ }
+
+ required string property = 10;
+ optional Direction direction = 11 [default = ASCENDING];
+ }
+
+ enum Hint {
+ ORDER_FIRST = 1;
+ ANCESTOR_FIRST = 2;
+ FILTER_FIRST = 3;
+ }
+ optional Hint hint = 18;
+
+ optional int32 count = 23;
+
+ optional int32 offset = 12 [default = 0];
+
+ optional int32 limit = 16;
+
+ optional CompiledCursor compiled_cursor = 30;
+ optional CompiledCursor end_compiled_cursor = 31;
+
+ repeated CompositeIndex composite_index = 19;
+
+ optional bool require_perfect_plan = 20 [default = false];
+
+ optional bool keys_only = 21 [default = false];
+
+ optional Transaction transaction = 22;
+
+ optional bool compile = 25 [default = false];
+
+ optional int64 failover_ms = 26;
+
+ optional bool strong = 32;
+
+ repeated string property_name = 33;
+
+ repeated string group_by_property_name = 34;
+
+ optional bool distinct = 24;
+
+ optional int64 min_safe_time_seconds = 35;
+
+ repeated string safe_replica_name = 36;
+
+ optional bool persist_offset = 37 [default=false];
+}
+
+message CompiledQuery {
+ required group PrimaryScan = 1 {
+ optional string index_name = 2;
+
+ optional string start_key = 3;
+ optional bool start_inclusive = 4;
+ optional string end_key = 5;
+ optional bool end_inclusive = 6;
+
+ repeated string start_postfix_value = 22;
+ repeated string end_postfix_value = 23;
+
+ optional int64 end_unapplied_log_timestamp_us = 19;
+ }
+
+ repeated group MergeJoinScan = 7 {
+ required string index_name = 8;
+
+ repeated string prefix_value = 9;
+
+ optional bool value_prefix = 20 [default=false];
+ }
+
+ optional Index index_def = 21;
+
+ optional int32 offset = 10 [default = 0];
+
+ optional int32 limit = 11;
+
+ required bool keys_only = 12;
+
+ repeated string property_name = 24;
+
+ optional int32 distinct_infix_size = 25;
+
+ optional group EntityFilter = 13 {
+ optional bool distinct = 14 [default=false];
+
+ optional string kind = 17;
+ optional Reference ancestor = 18;
+ }
+}
+
+message CompiledCursor {
+ optional group Position = 2 {
+ optional string start_key = 27;
+
+ repeated group IndexValue = 29 {
+ optional string property = 30;
+ required PropertyValue value = 31;
+ }
+
+ optional Reference key = 32;
+
+ optional bool start_inclusive = 28 [default=true];
+ }
+}
+
+message Cursor {
+ required fixed64 cursor = 1;
+
+ optional string app = 2;
+}
+
+message Error {
+ enum ErrorCode {
+ BAD_REQUEST = 1;
+ CONCURRENT_TRANSACTION = 2;
+ INTERNAL_ERROR = 3;
+ NEED_INDEX = 4;
+ TIMEOUT = 5;
+ PERMISSION_DENIED = 6;
+ BIGTABLE_ERROR = 7;
+ COMMITTED_BUT_STILL_APPLYING = 8;
+ CAPABILITY_DISABLED = 9;
+ TRY_ALTERNATE_BACKEND = 10;
+ SAFE_TIME_TOO_OLD = 11;
+ }
+}
+
+message Cost {
+ optional int32 index_writes = 1;
+ optional int32 index_write_bytes = 2;
+ optional int32 entity_writes = 3;
+ optional int32 entity_write_bytes = 4;
+ optional group CommitCost = 5 {
+ optional int32 requested_entity_puts = 6;
+ optional int32 requested_entity_deletes = 7;
+ };
+ optional int32 approximate_storage_delta = 8;
+ optional int32 id_sequence_updates = 9;
+}
+
+message GetRequest {
+ optional InternalHeader header = 6;
+
+ repeated Reference key = 1;
+ optional Transaction transaction = 2;
+
+ optional int64 failover_ms = 3;
+
+ optional bool strong = 4;
+
+ optional bool allow_deferred = 5 [default=false];
+}
+
+message GetResponse {
+ repeated group Entity = 1 {
+ optional EntityProto entity = 2;
+ optional Reference key = 4;
+
+ optional int64 version = 3;
+ }
+
+ repeated Reference deferred = 5;
+
+ optional bool in_order = 6 [default=true];
+}
+
+message PutRequest {
+ optional InternalHeader header = 11;
+
+ repeated EntityProto entity = 1;
+ optional Transaction transaction = 2;
+ repeated CompositeIndex composite_index = 3;
+
+ optional bool trusted = 4 [default = false];
+
+ optional bool force = 7 [default = false];
+
+ optional bool mark_changes = 8 [default = false];
+ repeated Snapshot snapshot = 9;
+
+ enum AutoIdPolicy {
+ CURRENT = 0;
+ SEQUENTIAL = 1;
+ }
+ optional AutoIdPolicy auto_id_policy = 10 [default = CURRENT];
+}
+
+message PutResponse {
+ repeated Reference key = 1;
+ optional Cost cost = 2;
+ repeated int64 version = 3;
+}
+
+message TouchRequest {
+ optional InternalHeader header = 10;
+
+ repeated Reference key = 1;
+ repeated CompositeIndex composite_index = 2;
+ optional bool force = 3 [default = false];
+ repeated Snapshot snapshot = 9;
+}
+
+message TouchResponse {
+ optional Cost cost = 1;
+}
+
+message DeleteRequest {
+ optional InternalHeader header = 10;
+
+ repeated Reference key = 6;
+ optional Transaction transaction = 5;
+
+ optional bool trusted = 4 [default = false];
+
+ optional bool force = 7 [default = false];
+
+ optional bool mark_changes = 8 [default = false];
+ repeated Snapshot snapshot = 9;
+}
+
+message DeleteResponse {
+ optional Cost cost = 1;
+ repeated int64 version = 3;
+}
+
+message NextRequest {
+ optional InternalHeader header = 5;
+
+ required Cursor cursor = 1;
+ optional int32 count = 2;
+
+ optional int32 offset = 4 [default = 0];
+
+ optional bool compile = 3 [default = false];
+}
+
+message QueryResult {
+ optional Cursor cursor = 1;
+
+ repeated EntityProto result = 2;
+
+ optional int32 skipped_results = 7;
+
+ required bool more_results = 3;
+
+ optional bool keys_only = 4;
+
+ optional bool index_only = 9;
+
+ optional bool small_ops = 10;
+
+ optional CompiledQuery compiled_query = 5;
+
+ optional CompiledCursor compiled_cursor = 6;
+
+ repeated CompositeIndex index = 8;
+
+ repeated int64 version = 11;
+}
+
+message AllocateIdsRequest {
+ optional InternalHeader header = 4;
+
+ optional Reference model_key = 1;
+
+ optional int64 size = 2;
+
+ optional int64 max = 3;
+
+ repeated Reference reserve = 5;
+}
+
+message AllocateIdsResponse {
+ required int64 start = 1;
+ required int64 end = 2;
+ optional Cost cost = 3;
+}
+
+message CompositeIndices {
+ repeated CompositeIndex index = 1;
+}
+
+message AddActionsRequest {
+ optional InternalHeader header = 3;
+
+ required Transaction transaction = 1;
+ repeated Action action = 2;
+}
+
+message AddActionsResponse {
+}
+
+message BeginTransactionRequest {
+ optional InternalHeader header = 3;
+
+ required string app = 1;
+ optional bool allow_multiple_eg = 2 [default = false];
+}
+
+message CommitResponse {
+ optional Cost cost = 1;
+
+ repeated group Version = 3 {
+ required Reference root_entity_key = 4;
+ required int64 version = 5;
+ }
+}
diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go
new file mode 100644
index 0000000..d538701
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/identity.go
@@ -0,0 +1,14 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+import netcontext "golang.org/x/net/context"
+
+// These functions are implementations of the wrapper functions
+// in ../appengine/identity.go. See that file for commentary.
+
+func AppID(c netcontext.Context) string {
+ return appID(FullyQualifiedAppID(c))
+}
diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go
new file mode 100644
index 0000000..e6b9227
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/identity_classic.go
@@ -0,0 +1,27 @@
+// Copyright 2015 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build appengine
+
+package internal
+
+import (
+ "appengine"
+
+ netcontext "golang.org/x/net/context"
+)
+
+func DefaultVersionHostname(ctx netcontext.Context) string {
+ return appengine.DefaultVersionHostname(fromContext(ctx))
+}
+
+func RequestID(ctx netcontext.Context) string { return appengine.RequestID(fromContext(ctx)) }
+func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() }
+func ServerSoftware() string { return appengine.ServerSoftware() }
+func ModuleName(ctx netcontext.Context) string { return appengine.ModuleName(fromContext(ctx)) }
+func VersionID(ctx netcontext.Context) string { return appengine.VersionID(fromContext(ctx)) }
+func InstanceID() string { return appengine.InstanceID() }
+func IsDevAppServer() bool { return appengine.IsDevAppServer() }
+
+func fullyQualifiedAppID(ctx netcontext.Context) string { return fromContext(ctx).FullyQualifiedAppID() }
diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go
new file mode 100644
index 0000000..ebe68b7
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/identity_vm.go
@@ -0,0 +1,97 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package internal
+
+import (
+ "net/http"
+ "os"
+
+ netcontext "golang.org/x/net/context"
+)
+
+// These functions are implementations of the wrapper functions
+// in ../appengine/identity.go. See that file for commentary.
+
+const (
+ hDefaultVersionHostname = "X-AppEngine-Default-Version-Hostname"
+ hRequestLogId = "X-AppEngine-Request-Log-Id"
+ hDatacenter = "X-AppEngine-Datacenter"
+)
+
+func ctxHeaders(ctx netcontext.Context) http.Header {
+ return fromContext(ctx).Request().Header
+}
+
+func DefaultVersionHostname(ctx netcontext.Context) string {
+ return ctxHeaders(ctx).Get(hDefaultVersionHostname)
+}
+
+func RequestID(ctx netcontext.Context) string {
+ return ctxHeaders(ctx).Get(hRequestLogId)
+}
+
+func Datacenter(ctx netcontext.Context) string {
+ return ctxHeaders(ctx).Get(hDatacenter)
+}
+
+func ServerSoftware() string {
+ // TODO(dsymonds): Remove fallback when we've verified this.
+ if s := os.Getenv("SERVER_SOFTWARE"); s != "" {
+ return s
+ }
+ return "Google App Engine/1.x.x"
+}
+
+// TODO(dsymonds): Remove the metadata fetches.
+
+func ModuleName(_ netcontext.Context) string {
+ if s := os.Getenv("GAE_MODULE_NAME"); s != "" {
+ return s
+ }
+ return string(mustGetMetadata("instance/attributes/gae_backend_name"))
+}
+
+func VersionID(_ netcontext.Context) string {
+ if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" {
+ return s1 + "." + s2
+ }
+ return string(mustGetMetadata("instance/attributes/gae_backend_version")) + "." + string(mustGetMetadata("instance/attributes/gae_backend_minor_version"))
+}
+
+func InstanceID() string {
+ if s := os.Getenv("GAE_MODULE_INSTANCE"); s != "" {
+ return s
+ }
+ return string(mustGetMetadata("instance/attributes/gae_backend_instance"))
+}
+
+func partitionlessAppID() string {
+ // gae_project has everything except the partition prefix.
+ appID := os.Getenv("GAE_LONG_APP_ID")
+ if appID == "" {
+ appID = string(mustGetMetadata("instance/attributes/gae_project"))
+ }
+ return appID
+}
+
+func fullyQualifiedAppID(_ netcontext.Context) string {
+ appID := partitionlessAppID()
+
+ part := os.Getenv("GAE_PARTITION")
+ if part == "" {
+ part = string(mustGetMetadata("instance/attributes/gae_partition"))
+ }
+
+ if part != "" {
+ appID = part + "~" + appID
+ }
+ return appID
+}
+
+func IsDevAppServer() bool {
+ return os.Getenv("RUN_WITH_DEVAPPSERVER") != ""
+}
diff --git a/vendor/google.golang.org/appengine/internal/internal.go b/vendor/google.golang.org/appengine/internal/internal.go
new file mode 100644
index 0000000..66e8d76
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/internal.go
@@ -0,0 +1,144 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package internal provides support for package appengine.
+//
+// Programs should not use this package directly. Its API is not stable.
+// Use packages appengine and appengine/* instead.
+package internal
+
+import (
+ "fmt"
+ "io"
+ "log"
+ "net/http"
+ "net/url"
+ "os"
+
+ "github.com/golang/protobuf/proto"
+
+ remotepb "google.golang.org/appengine/internal/remote_api"
+)
+
+// errorCodeMaps is a map of service name to the error code map for the service.
+var errorCodeMaps = make(map[string]map[int32]string)
+
+// RegisterErrorCodeMap is called from API implementations to register their
+// error code map. This should only be called from init functions.
+func RegisterErrorCodeMap(service string, m map[int32]string) {
+ errorCodeMaps[service] = m
+}
+
+type timeoutCodeKey struct {
+ service string
+ code int32
+}
+
+// timeoutCodes is the set of service+code pairs that represent timeouts.
+var timeoutCodes = make(map[timeoutCodeKey]bool)
+
+func RegisterTimeoutErrorCode(service string, code int32) {
+ timeoutCodes[timeoutCodeKey{service, code}] = true
+}
+
+// APIError is the type returned by appengine.Context's Call method
+// when an API call fails in an API-specific way. This may be, for instance,
+// a taskqueue API call failing with TaskQueueServiceError::UNKNOWN_QUEUE.
+type APIError struct {
+ Service string
+ Detail string
+ Code int32 // API-specific error code
+}
+
+func (e *APIError) Error() string {
+ if e.Code == 0 {
+ if e.Detail == "" {
+ return "APIError <empty>"
+ }
+ return e.Detail
+ }
+ s := fmt.Sprintf("API error %d", e.Code)
+ if m, ok := errorCodeMaps[e.Service]; ok {
+ s += " (" + e.Service + ": " + m[e.Code] + ")"
+ } else {
+ // Shouldn't happen, but provide a bit more detail if it does.
+ s = e.Service + " " + s
+ }
+ if e.Detail != "" {
+ s += ": " + e.Detail
+ }
+ return s
+}
+
+func (e *APIError) IsTimeout() bool {
+ return timeoutCodes[timeoutCodeKey{e.Service, e.Code}]
+}
+
+// CallError is the type returned by appengine.Context's Call method when an
+// API call fails in a generic way, such as RpcError::CAPABILITY_DISABLED.
+type CallError struct {
+ Detail string
+ Code int32
+ // TODO: Remove this if we get a distinguishable error code.
+ Timeout bool
+}
+
+func (e *CallError) Error() string {
+ var msg string
+ switch remotepb.RpcError_ErrorCode(e.Code) {
+ case remotepb.RpcError_UNKNOWN:
+ return e.Detail
+ case remotepb.RpcError_OVER_QUOTA:
+ msg = "Over quota"
+ case remotepb.RpcError_CAPABILITY_DISABLED:
+ msg = "Capability disabled"
+ case remotepb.RpcError_CANCELLED:
+ msg = "Canceled"
+ default:
+ msg = fmt.Sprintf("Call error %d", e.Code)
+ }
+ s := msg + ": " + e.Detail
+ if e.Timeout {
+ s += " (timeout)"
+ }
+ return s
+}
+
+func (e *CallError) IsTimeout() bool {
+ return e.Timeout
+}
+
+func Main() {
+ installHealthChecker(http.DefaultServeMux)
+
+ port := "8080"
+ if s := os.Getenv("PORT"); s != "" {
+ port = s
+ }
+
+ if err := http.ListenAndServe(":"+port, http.HandlerFunc(handleHTTP)); err != nil {
+ log.Fatalf("http.ListenAndServe: %v", err)
+ }
+}
+
+func installHealthChecker(mux *http.ServeMux) {
+ // If no health check handler has been installed by this point, add a trivial one.
+ const healthPath = "/_ah/health"
+ hreq := &http.Request{
+ Method: "GET",
+ URL: &url.URL{
+ Path: healthPath,
+ },
+ }
+ if _, pat := mux.Handler(hreq); pat != healthPath {
+ mux.HandleFunc(healthPath, func(w http.ResponseWriter, r *http.Request) {
+ io.WriteString(w, "ok")
+ })
+ }
+}
+
+// NamespaceMods is a map from API service to a function that will mutate an RPC request to attach a namespace.
+// The function should be prepared to be called on the same message more than once; it should only modify the
+// RPC request the first time.
+var NamespaceMods = make(map[string]func(m proto.Message, namespace string))
diff --git a/vendor/google.golang.org/appengine/internal/log_vm.go b/vendor/google.golang.org/appengine/internal/log_vm.go
new file mode 100644
index 0000000..1e7c9f2
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/log_vm.go
@@ -0,0 +1,109 @@
+// Copyright 2016 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+package internal
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "strings"
+ "sync"
+ "time"
+)
+
+// jsonLogger writes logs in the JSON format required for Flex Logging. It can
+// be used concurrently.
+type jsonLogger struct {
+ mu sync.Mutex
+ enc *json.Encoder
+}
+
+type logLine struct {
+ Message string `json:"message"`
+ Timestamp logTimestamp `json:"timestamp"`
+ Severity string `json:"severity"`
+ TraceID string `json:"traceId,omitempty"`
+}
+
+type logTimestamp struct {
+ Seconds int64 `json:"seconds"`
+ Nanos int `json:"nanos"`
+}
+
+var (
+ logger *jsonLogger
+ loggerOnce sync.Once
+
+ logPath = "/var/log/app_engine/app.json"
+ stderrLogger = newJSONLogger(os.Stderr)
+ testLogger = newJSONLogger(ioutil.Discard)
+
+ levels = map[int64]string{
+ 0: "DEBUG",
+ 1: "INFO",
+ 2: "WARNING",
+ 3: "ERROR",
+ 4: "CRITICAL",
+ }
+)
+
+func globalLogger() *jsonLogger {
+ loggerOnce.Do(func() {
+ f, err := os.OpenFile(logPath, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)
+ if err != nil {
+ log.Printf("failed to open/create log file, logging to stderr: %v", err)
+ logger = stderrLogger
+ return
+ }
+
+ logger = newJSONLogger(f)
+ })
+
+ return logger
+}
+
+func logf(ctx *context, level int64, format string, args ...interface{}) {
+ s := strings.TrimSpace(fmt.Sprintf(format, args...))
+ now := time.Now()
+
+ trace := ctx.req.Header.Get(traceHeader)
+ if i := strings.Index(trace, "/"); i > -1 {
+ trace = trace[:i]
+ }
+
+ line := &logLine{
+ Message: s,
+ Timestamp: logTimestamp{
+ Seconds: now.Unix(),
+ Nanos: now.Nanosecond(),
+ },
+ Severity: levels[level],
+ TraceID: trace,
+ }
+
+ if err := ctx.logger.emit(line); err != nil {
+ log.Printf("failed to write log line to file: %v", err)
+ }
+
+ log.Print(levels[level] + ": " + s)
+}
+
+func newJSONLogger(w io.Writer) *jsonLogger {
+ return &jsonLogger{
+ enc: json.NewEncoder(w),
+ }
+}
+
+func (l *jsonLogger) emit(line *logLine) error {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+
+ return l.enc.Encode(line)
+}
diff --git a/vendor/google.golang.org/appengine/internal/metadata.go b/vendor/google.golang.org/appengine/internal/metadata.go
new file mode 100644
index 0000000..9cc1f71
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/metadata.go
@@ -0,0 +1,61 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+// This file has code for accessing metadata.
+//
+// References:
+// https://cloud.google.com/compute/docs/metadata
+
+import (
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "net/url"
+)
+
+const (
+ metadataHost = "metadata"
+ metadataPath = "/computeMetadata/v1/"
+)
+
+var (
+ metadataRequestHeaders = http.Header{
+ "Metadata-Flavor": []string{"Google"},
+ }
+)
+
+// TODO(dsymonds): Do we need to support default values, like Python?
+func mustGetMetadata(key string) []byte {
+ b, err := getMetadata(key)
+ if err != nil {
+ log.Fatalf("Metadata fetch failed: %v", err)
+ }
+ return b
+}
+
+func getMetadata(key string) ([]byte, error) {
+ // TODO(dsymonds): May need to use url.Parse to support keys with query args.
+ req := &http.Request{
+ Method: "GET",
+ URL: &url.URL{
+ Scheme: "http",
+ Host: metadataHost,
+ Path: metadataPath + key,
+ },
+ Header: metadataRequestHeaders,
+ Host: metadataHost,
+ }
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != 200 {
+ return nil, fmt.Errorf("metadata server returned HTTP %d", resp.StatusCode)
+ }
+ return ioutil.ReadAll(resp.Body)
+}
diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go b/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go
new file mode 100644
index 0000000..a0145ed
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/modules/modules_service.pb.go
@@ -0,0 +1,375 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/modules/modules_service.proto
+// DO NOT EDIT!
+
+/*
+Package modules is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/modules/modules_service.proto
+
+It has these top-level messages:
+ ModulesServiceError
+ GetModulesRequest
+ GetModulesResponse
+ GetVersionsRequest
+ GetVersionsResponse
+ GetDefaultVersionRequest
+ GetDefaultVersionResponse
+ GetNumInstancesRequest
+ GetNumInstancesResponse
+ SetNumInstancesRequest
+ SetNumInstancesResponse
+ StartModuleRequest
+ StartModuleResponse
+ StopModuleRequest
+ StopModuleResponse
+ GetHostnameRequest
+ GetHostnameResponse
+*/
+package modules
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type ModulesServiceError_ErrorCode int32
+
+const (
+ ModulesServiceError_OK ModulesServiceError_ErrorCode = 0
+ ModulesServiceError_INVALID_MODULE ModulesServiceError_ErrorCode = 1
+ ModulesServiceError_INVALID_VERSION ModulesServiceError_ErrorCode = 2
+ ModulesServiceError_INVALID_INSTANCES ModulesServiceError_ErrorCode = 3
+ ModulesServiceError_TRANSIENT_ERROR ModulesServiceError_ErrorCode = 4
+ ModulesServiceError_UNEXPECTED_STATE ModulesServiceError_ErrorCode = 5
+)
+
+var ModulesServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INVALID_MODULE",
+ 2: "INVALID_VERSION",
+ 3: "INVALID_INSTANCES",
+ 4: "TRANSIENT_ERROR",
+ 5: "UNEXPECTED_STATE",
+}
+var ModulesServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INVALID_MODULE": 1,
+ "INVALID_VERSION": 2,
+ "INVALID_INSTANCES": 3,
+ "TRANSIENT_ERROR": 4,
+ "UNEXPECTED_STATE": 5,
+}
+
+func (x ModulesServiceError_ErrorCode) Enum() *ModulesServiceError_ErrorCode {
+ p := new(ModulesServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x ModulesServiceError_ErrorCode) String() string {
+ return proto.EnumName(ModulesServiceError_ErrorCode_name, int32(x))
+}
+func (x *ModulesServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ModulesServiceError_ErrorCode_value, data, "ModulesServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = ModulesServiceError_ErrorCode(value)
+ return nil
+}
+
+type ModulesServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ModulesServiceError) Reset() { *m = ModulesServiceError{} }
+func (m *ModulesServiceError) String() string { return proto.CompactTextString(m) }
+func (*ModulesServiceError) ProtoMessage() {}
+
+type GetModulesRequest struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetModulesRequest) Reset() { *m = GetModulesRequest{} }
+func (m *GetModulesRequest) String() string { return proto.CompactTextString(m) }
+func (*GetModulesRequest) ProtoMessage() {}
+
+type GetModulesResponse struct {
+ Module []string `protobuf:"bytes,1,rep,name=module" json:"module,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetModulesResponse) Reset() { *m = GetModulesResponse{} }
+func (m *GetModulesResponse) String() string { return proto.CompactTextString(m) }
+func (*GetModulesResponse) ProtoMessage() {}
+
+func (m *GetModulesResponse) GetModule() []string {
+ if m != nil {
+ return m.Module
+ }
+ return nil
+}
+
+type GetVersionsRequest struct {
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetVersionsRequest) Reset() { *m = GetVersionsRequest{} }
+func (m *GetVersionsRequest) String() string { return proto.CompactTextString(m) }
+func (*GetVersionsRequest) ProtoMessage() {}
+
+func (m *GetVersionsRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+type GetVersionsResponse struct {
+ Version []string `protobuf:"bytes,1,rep,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetVersionsResponse) Reset() { *m = GetVersionsResponse{} }
+func (m *GetVersionsResponse) String() string { return proto.CompactTextString(m) }
+func (*GetVersionsResponse) ProtoMessage() {}
+
+func (m *GetVersionsResponse) GetVersion() []string {
+ if m != nil {
+ return m.Version
+ }
+ return nil
+}
+
+type GetDefaultVersionRequest struct {
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetDefaultVersionRequest) Reset() { *m = GetDefaultVersionRequest{} }
+func (m *GetDefaultVersionRequest) String() string { return proto.CompactTextString(m) }
+func (*GetDefaultVersionRequest) ProtoMessage() {}
+
+func (m *GetDefaultVersionRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+type GetDefaultVersionResponse struct {
+ Version *string `protobuf:"bytes,1,req,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetDefaultVersionResponse) Reset() { *m = GetDefaultVersionResponse{} }
+func (m *GetDefaultVersionResponse) String() string { return proto.CompactTextString(m) }
+func (*GetDefaultVersionResponse) ProtoMessage() {}
+
+func (m *GetDefaultVersionResponse) GetVersion() string {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return ""
+}
+
+type GetNumInstancesRequest struct {
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetNumInstancesRequest) Reset() { *m = GetNumInstancesRequest{} }
+func (m *GetNumInstancesRequest) String() string { return proto.CompactTextString(m) }
+func (*GetNumInstancesRequest) ProtoMessage() {}
+
+func (m *GetNumInstancesRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+func (m *GetNumInstancesRequest) GetVersion() string {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return ""
+}
+
+type GetNumInstancesResponse struct {
+ Instances *int64 `protobuf:"varint,1,req,name=instances" json:"instances,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetNumInstancesResponse) Reset() { *m = GetNumInstancesResponse{} }
+func (m *GetNumInstancesResponse) String() string { return proto.CompactTextString(m) }
+func (*GetNumInstancesResponse) ProtoMessage() {}
+
+func (m *GetNumInstancesResponse) GetInstances() int64 {
+ if m != nil && m.Instances != nil {
+ return *m.Instances
+ }
+ return 0
+}
+
+type SetNumInstancesRequest struct {
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
+ Instances *int64 `protobuf:"varint,3,req,name=instances" json:"instances,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SetNumInstancesRequest) Reset() { *m = SetNumInstancesRequest{} }
+func (m *SetNumInstancesRequest) String() string { return proto.CompactTextString(m) }
+func (*SetNumInstancesRequest) ProtoMessage() {}
+
+func (m *SetNumInstancesRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+func (m *SetNumInstancesRequest) GetVersion() string {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return ""
+}
+
+func (m *SetNumInstancesRequest) GetInstances() int64 {
+ if m != nil && m.Instances != nil {
+ return *m.Instances
+ }
+ return 0
+}
+
+type SetNumInstancesResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SetNumInstancesResponse) Reset() { *m = SetNumInstancesResponse{} }
+func (m *SetNumInstancesResponse) String() string { return proto.CompactTextString(m) }
+func (*SetNumInstancesResponse) ProtoMessage() {}
+
+type StartModuleRequest struct {
+ Module *string `protobuf:"bytes,1,req,name=module" json:"module,omitempty"`
+ Version *string `protobuf:"bytes,2,req,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *StartModuleRequest) Reset() { *m = StartModuleRequest{} }
+func (m *StartModuleRequest) String() string { return proto.CompactTextString(m) }
+func (*StartModuleRequest) ProtoMessage() {}
+
+func (m *StartModuleRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+func (m *StartModuleRequest) GetVersion() string {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return ""
+}
+
+type StartModuleResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *StartModuleResponse) Reset() { *m = StartModuleResponse{} }
+func (m *StartModuleResponse) String() string { return proto.CompactTextString(m) }
+func (*StartModuleResponse) ProtoMessage() {}
+
+type StopModuleRequest struct {
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *StopModuleRequest) Reset() { *m = StopModuleRequest{} }
+func (m *StopModuleRequest) String() string { return proto.CompactTextString(m) }
+func (*StopModuleRequest) ProtoMessage() {}
+
+func (m *StopModuleRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+func (m *StopModuleRequest) GetVersion() string {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return ""
+}
+
+type StopModuleResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *StopModuleResponse) Reset() { *m = StopModuleResponse{} }
+func (m *StopModuleResponse) String() string { return proto.CompactTextString(m) }
+func (*StopModuleResponse) ProtoMessage() {}
+
+type GetHostnameRequest struct {
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
+ Instance *string `protobuf:"bytes,3,opt,name=instance" json:"instance,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetHostnameRequest) Reset() { *m = GetHostnameRequest{} }
+func (m *GetHostnameRequest) String() string { return proto.CompactTextString(m) }
+func (*GetHostnameRequest) ProtoMessage() {}
+
+func (m *GetHostnameRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+func (m *GetHostnameRequest) GetVersion() string {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return ""
+}
+
+func (m *GetHostnameRequest) GetInstance() string {
+ if m != nil && m.Instance != nil {
+ return *m.Instance
+ }
+ return ""
+}
+
+type GetHostnameResponse struct {
+ Hostname *string `protobuf:"bytes,1,req,name=hostname" json:"hostname,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetHostnameResponse) Reset() { *m = GetHostnameResponse{} }
+func (m *GetHostnameResponse) String() string { return proto.CompactTextString(m) }
+func (*GetHostnameResponse) ProtoMessage() {}
+
+func (m *GetHostnameResponse) GetHostname() string {
+ if m != nil && m.Hostname != nil {
+ return *m.Hostname
+ }
+ return ""
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/modules/modules_service.proto b/vendor/google.golang.org/appengine/internal/modules/modules_service.proto
new file mode 100644
index 0000000..d29f006
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/modules/modules_service.proto
@@ -0,0 +1,80 @@
+syntax = "proto2";
+option go_package = "modules";
+
+package appengine;
+
+message ModulesServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INVALID_MODULE = 1;
+ INVALID_VERSION = 2;
+ INVALID_INSTANCES = 3;
+ TRANSIENT_ERROR = 4;
+ UNEXPECTED_STATE = 5;
+ }
+}
+
+message GetModulesRequest {
+}
+
+message GetModulesResponse {
+ repeated string module = 1;
+}
+
+message GetVersionsRequest {
+ optional string module = 1;
+}
+
+message GetVersionsResponse {
+ repeated string version = 1;
+}
+
+message GetDefaultVersionRequest {
+ optional string module = 1;
+}
+
+message GetDefaultVersionResponse {
+ required string version = 1;
+}
+
+message GetNumInstancesRequest {
+ optional string module = 1;
+ optional string version = 2;
+}
+
+message GetNumInstancesResponse {
+ required int64 instances = 1;
+}
+
+message SetNumInstancesRequest {
+ optional string module = 1;
+ optional string version = 2;
+ required int64 instances = 3;
+}
+
+message SetNumInstancesResponse {}
+
+message StartModuleRequest {
+ required string module = 1;
+ required string version = 2;
+}
+
+message StartModuleResponse {}
+
+message StopModuleRequest {
+ optional string module = 1;
+ optional string version = 2;
+}
+
+message StopModuleResponse {}
+
+message GetHostnameRequest {
+ optional string module = 1;
+ optional string version = 2;
+ optional string instance = 3;
+}
+
+message GetHostnameResponse {
+ required string hostname = 1;
+}
+
diff --git a/vendor/google.golang.org/appengine/internal/net.go b/vendor/google.golang.org/appengine/internal/net.go
new file mode 100644
index 0000000..3b94cf0
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/net.go
@@ -0,0 +1,56 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+// This file implements a network dialer that limits the number of concurrent connections.
+// It is only used for API calls.
+
+import (
+ "log"
+ "net"
+ "runtime"
+ "sync"
+ "time"
+)
+
+var limitSem = make(chan int, 100) // TODO(dsymonds): Use environment variable.
+
+func limitRelease() {
+ // non-blocking
+ select {
+ case <-limitSem:
+ default:
+ // This should not normally happen.
+ log.Print("appengine: unbalanced limitSem release!")
+ }
+}
+
+func limitDial(network, addr string) (net.Conn, error) {
+ limitSem <- 1
+
+ // Dial with a timeout in case the API host is MIA.
+ // The connection should normally be very fast.
+ conn, err := net.DialTimeout(network, addr, 500*time.Millisecond)
+ if err != nil {
+ limitRelease()
+ return nil, err
+ }
+ lc := &limitConn{Conn: conn}
+ runtime.SetFinalizer(lc, (*limitConn).Close) // shouldn't usually be required
+ return lc, nil
+}
+
+type limitConn struct {
+ close sync.Once
+ net.Conn
+}
+
+func (lc *limitConn) Close() error {
+ defer lc.close.Do(func() {
+ limitRelease()
+ runtime.SetFinalizer(lc, nil)
+ })
+ return lc.Conn.Close()
+}
diff --git a/vendor/google.golang.org/appengine/internal/regen.sh b/vendor/google.golang.org/appengine/internal/regen.sh
new file mode 100755
index 0000000..2fdb546
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/regen.sh
@@ -0,0 +1,40 @@
+#!/bin/bash -e
+#
+# This script rebuilds the generated code for the protocol buffers.
+# To run this you will need protoc and goprotobuf installed;
+# see https://github.com/golang/protobuf for instructions.
+
+PKG=google.golang.org/appengine
+
+function die() {
+ echo 1>&2 $*
+ exit 1
+}
+
+# Sanity check that the right tools are accessible.
+for tool in go protoc protoc-gen-go; do
+ q=$(which $tool) || die "didn't find $tool"
+ echo 1>&2 "$tool: $q"
+done
+
+echo -n 1>&2 "finding package dir... "
+pkgdir=$(go list -f '{{.Dir}}' $PKG)
+echo 1>&2 $pkgdir
+base=$(echo $pkgdir | sed "s,/$PKG\$,,")
+echo 1>&2 "base: $base"
+cd $base
+
+# Run protoc once per package.
+for dir in $(find $PKG/internal -name '*.proto' | xargs dirname | sort | uniq); do
+ echo 1>&2 "* $dir"
+ protoc --go_out=. $dir/*.proto
+done
+
+for f in $(find $PKG/internal -name '*.pb.go'); do
+ # Remove proto.RegisterEnum calls.
+ # These cause duplicate registration panics when these packages
+ # are used on classic App Engine. proto.RegisterEnum only affects
+ # parsing the text format; we don't care about that.
+ # https://code.google.com/p/googleappengine/issues/detail?id=11670#c17
+ sed -i '/proto.RegisterEnum/d' $f
+done
diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
new file mode 100644
index 0000000..526bd39
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
@@ -0,0 +1,231 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/remote_api/remote_api.proto
+// DO NOT EDIT!
+
+/*
+Package remote_api is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/remote_api/remote_api.proto
+
+It has these top-level messages:
+ Request
+ ApplicationError
+ RpcError
+ Response
+*/
+package remote_api
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type RpcError_ErrorCode int32
+
+const (
+ RpcError_UNKNOWN RpcError_ErrorCode = 0
+ RpcError_CALL_NOT_FOUND RpcError_ErrorCode = 1
+ RpcError_PARSE_ERROR RpcError_ErrorCode = 2
+ RpcError_SECURITY_VIOLATION RpcError_ErrorCode = 3
+ RpcError_OVER_QUOTA RpcError_ErrorCode = 4
+ RpcError_REQUEST_TOO_LARGE RpcError_ErrorCode = 5
+ RpcError_CAPABILITY_DISABLED RpcError_ErrorCode = 6
+ RpcError_FEATURE_DISABLED RpcError_ErrorCode = 7
+ RpcError_BAD_REQUEST RpcError_ErrorCode = 8
+ RpcError_RESPONSE_TOO_LARGE RpcError_ErrorCode = 9
+ RpcError_CANCELLED RpcError_ErrorCode = 10
+ RpcError_REPLAY_ERROR RpcError_ErrorCode = 11
+ RpcError_DEADLINE_EXCEEDED RpcError_ErrorCode = 12
+)
+
+var RpcError_ErrorCode_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "CALL_NOT_FOUND",
+ 2: "PARSE_ERROR",
+ 3: "SECURITY_VIOLATION",
+ 4: "OVER_QUOTA",
+ 5: "REQUEST_TOO_LARGE",
+ 6: "CAPABILITY_DISABLED",
+ 7: "FEATURE_DISABLED",
+ 8: "BAD_REQUEST",
+ 9: "RESPONSE_TOO_LARGE",
+ 10: "CANCELLED",
+ 11: "REPLAY_ERROR",
+ 12: "DEADLINE_EXCEEDED",
+}
+var RpcError_ErrorCode_value = map[string]int32{
+ "UNKNOWN": 0,
+ "CALL_NOT_FOUND": 1,
+ "PARSE_ERROR": 2,
+ "SECURITY_VIOLATION": 3,
+ "OVER_QUOTA": 4,
+ "REQUEST_TOO_LARGE": 5,
+ "CAPABILITY_DISABLED": 6,
+ "FEATURE_DISABLED": 7,
+ "BAD_REQUEST": 8,
+ "RESPONSE_TOO_LARGE": 9,
+ "CANCELLED": 10,
+ "REPLAY_ERROR": 11,
+ "DEADLINE_EXCEEDED": 12,
+}
+
+func (x RpcError_ErrorCode) Enum() *RpcError_ErrorCode {
+ p := new(RpcError_ErrorCode)
+ *p = x
+ return p
+}
+func (x RpcError_ErrorCode) String() string {
+ return proto.EnumName(RpcError_ErrorCode_name, int32(x))
+}
+func (x *RpcError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(RpcError_ErrorCode_value, data, "RpcError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = RpcError_ErrorCode(value)
+ return nil
+}
+
+type Request struct {
+ ServiceName *string `protobuf:"bytes,2,req,name=service_name" json:"service_name,omitempty"`
+ Method *string `protobuf:"bytes,3,req,name=method" json:"method,omitempty"`
+ Request []byte `protobuf:"bytes,4,req,name=request" json:"request,omitempty"`
+ RequestId *string `protobuf:"bytes,5,opt,name=request_id" json:"request_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Request) Reset() { *m = Request{} }
+func (m *Request) String() string { return proto.CompactTextString(m) }
+func (*Request) ProtoMessage() {}
+
+func (m *Request) GetServiceName() string {
+ if m != nil && m.ServiceName != nil {
+ return *m.ServiceName
+ }
+ return ""
+}
+
+func (m *Request) GetMethod() string {
+ if m != nil && m.Method != nil {
+ return *m.Method
+ }
+ return ""
+}
+
+func (m *Request) GetRequest() []byte {
+ if m != nil {
+ return m.Request
+ }
+ return nil
+}
+
+func (m *Request) GetRequestId() string {
+ if m != nil && m.RequestId != nil {
+ return *m.RequestId
+ }
+ return ""
+}
+
+type ApplicationError struct {
+ Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
+ Detail *string `protobuf:"bytes,2,req,name=detail" json:"detail,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ApplicationError) Reset() { *m = ApplicationError{} }
+func (m *ApplicationError) String() string { return proto.CompactTextString(m) }
+func (*ApplicationError) ProtoMessage() {}
+
+func (m *ApplicationError) GetCode() int32 {
+ if m != nil && m.Code != nil {
+ return *m.Code
+ }
+ return 0
+}
+
+func (m *ApplicationError) GetDetail() string {
+ if m != nil && m.Detail != nil {
+ return *m.Detail
+ }
+ return ""
+}
+
+type RpcError struct {
+ Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
+ Detail *string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *RpcError) Reset() { *m = RpcError{} }
+func (m *RpcError) String() string { return proto.CompactTextString(m) }
+func (*RpcError) ProtoMessage() {}
+
+func (m *RpcError) GetCode() int32 {
+ if m != nil && m.Code != nil {
+ return *m.Code
+ }
+ return 0
+}
+
+func (m *RpcError) GetDetail() string {
+ if m != nil && m.Detail != nil {
+ return *m.Detail
+ }
+ return ""
+}
+
+type Response struct {
+ Response []byte `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"`
+ Exception []byte `protobuf:"bytes,2,opt,name=exception" json:"exception,omitempty"`
+ ApplicationError *ApplicationError `protobuf:"bytes,3,opt,name=application_error" json:"application_error,omitempty"`
+ JavaException []byte `protobuf:"bytes,4,opt,name=java_exception" json:"java_exception,omitempty"`
+ RpcError *RpcError `protobuf:"bytes,5,opt,name=rpc_error" json:"rpc_error,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Response) Reset() { *m = Response{} }
+func (m *Response) String() string { return proto.CompactTextString(m) }
+func (*Response) ProtoMessage() {}
+
+func (m *Response) GetResponse() []byte {
+ if m != nil {
+ return m.Response
+ }
+ return nil
+}
+
+func (m *Response) GetException() []byte {
+ if m != nil {
+ return m.Exception
+ }
+ return nil
+}
+
+func (m *Response) GetApplicationError() *ApplicationError {
+ if m != nil {
+ return m.ApplicationError
+ }
+ return nil
+}
+
+func (m *Response) GetJavaException() []byte {
+ if m != nil {
+ return m.JavaException
+ }
+ return nil
+}
+
+func (m *Response) GetRpcError() *RpcError {
+ if m != nil {
+ return m.RpcError
+ }
+ return nil
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto
new file mode 100644
index 0000000..f21763a
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto
@@ -0,0 +1,44 @@
+syntax = "proto2";
+option go_package = "remote_api";
+
+package remote_api;
+
+message Request {
+ required string service_name = 2;
+ required string method = 3;
+ required bytes request = 4;
+ optional string request_id = 5;
+}
+
+message ApplicationError {
+ required int32 code = 1;
+ required string detail = 2;
+}
+
+message RpcError {
+ enum ErrorCode {
+ UNKNOWN = 0;
+ CALL_NOT_FOUND = 1;
+ PARSE_ERROR = 2;
+ SECURITY_VIOLATION = 3;
+ OVER_QUOTA = 4;
+ REQUEST_TOO_LARGE = 5;
+ CAPABILITY_DISABLED = 6;
+ FEATURE_DISABLED = 7;
+ BAD_REQUEST = 8;
+ RESPONSE_TOO_LARGE = 9;
+ CANCELLED = 10;
+ REPLAY_ERROR = 11;
+ DEADLINE_EXCEEDED = 12;
+ }
+ required int32 code = 1;
+ optional string detail = 2;
+}
+
+message Response {
+ optional bytes response = 1;
+ optional bytes exception = 2;
+ optional ApplicationError application_error = 3;
+ optional bytes java_exception = 4;
+ optional RpcError rpc_error = 5;
+}
diff --git a/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/google.golang.org/appengine/internal/transaction.go
new file mode 100644
index 0000000..28a6d18
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/transaction.go
@@ -0,0 +1,107 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+// This file implements hooks for applying datastore transactions.
+
+import (
+ "errors"
+ "reflect"
+
+ "github.com/golang/protobuf/proto"
+ netcontext "golang.org/x/net/context"
+
+ basepb "google.golang.org/appengine/internal/base"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+var transactionSetters = make(map[reflect.Type]reflect.Value)
+
+// RegisterTransactionSetter registers a function that sets transaction information
+// in a protocol buffer message. f should be a function with two arguments,
+// the first being a protocol buffer type, and the second being *datastore.Transaction.
+func RegisterTransactionSetter(f interface{}) {
+ v := reflect.ValueOf(f)
+ transactionSetters[v.Type().In(0)] = v
+}
+
+// applyTransaction applies the transaction t to message pb
+// by using the relevant setter passed to RegisterTransactionSetter.
+func applyTransaction(pb proto.Message, t *pb.Transaction) {
+ v := reflect.ValueOf(pb)
+ if f, ok := transactionSetters[v.Type()]; ok {
+ f.Call([]reflect.Value{v, reflect.ValueOf(t)})
+ }
+}
+
+var transactionKey = "used for *Transaction"
+
+func transactionFromContext(ctx netcontext.Context) *transaction {
+ t, _ := ctx.Value(&transactionKey).(*transaction)
+ return t
+}
+
+func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context {
+ return netcontext.WithValue(ctx, &transactionKey, t)
+}
+
+type transaction struct {
+ transaction pb.Transaction
+ finished bool
+}
+
+var ErrConcurrentTransaction = errors.New("internal: concurrent transaction")
+
+func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool) error {
+ if transactionFromContext(c) != nil {
+ return errors.New("nested transactions are not supported")
+ }
+
+ // Begin the transaction.
+ t := &transaction{}
+ req := &pb.BeginTransactionRequest{
+ App: proto.String(FullyQualifiedAppID(c)),
+ }
+ if xg {
+ req.AllowMultipleEg = proto.Bool(true)
+ }
+ if err := Call(c, "datastore_v3", "BeginTransaction", req, &t.transaction); err != nil {
+ return err
+ }
+
+ // Call f, rolling back the transaction if f returns a non-nil error, or panics.
+ // The panic is not recovered.
+ defer func() {
+ if t.finished {
+ return
+ }
+ t.finished = true
+ // Ignore the error return value, since we are already returning a non-nil
+ // error (or we're panicking).
+ Call(c, "datastore_v3", "Rollback", &t.transaction, &basepb.VoidProto{})
+ }()
+ if err := f(withTransaction(c, t)); err != nil {
+ return err
+ }
+ t.finished = true
+
+ // Commit the transaction.
+ res := &pb.CommitResponse{}
+ err := Call(c, "datastore_v3", "Commit", &t.transaction, res)
+ if ae, ok := err.(*APIError); ok {
+ /* TODO: restore this conditional
+ if appengine.IsDevAppServer() {
+ */
+ // The Python Dev AppServer raises an ApplicationError with error code 2 (which is
+ // Error.CONCURRENT_TRANSACTION) and message "Concurrency exception.".
+ if ae.Code == int32(pb.Error_BAD_REQUEST) && ae.Detail == "ApplicationError: 2 Concurrency exception." {
+ return ErrConcurrentTransaction
+ }
+ if ae.Code == int32(pb.Error_CONCURRENT_TRANSACTION) {
+ return ErrConcurrentTransaction
+ }
+ }
+ return err
+}
diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go
new file mode 100644
index 0000000..af463fb
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go
@@ -0,0 +1,355 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
+// DO NOT EDIT!
+
+/*
+Package urlfetch is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
+
+It has these top-level messages:
+ URLFetchServiceError
+ URLFetchRequest
+ URLFetchResponse
+*/
+package urlfetch
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+type URLFetchServiceError_ErrorCode int32
+
+const (
+ URLFetchServiceError_OK URLFetchServiceError_ErrorCode = 0
+ URLFetchServiceError_INVALID_URL URLFetchServiceError_ErrorCode = 1
+ URLFetchServiceError_FETCH_ERROR URLFetchServiceError_ErrorCode = 2
+ URLFetchServiceError_UNSPECIFIED_ERROR URLFetchServiceError_ErrorCode = 3
+ URLFetchServiceError_RESPONSE_TOO_LARGE URLFetchServiceError_ErrorCode = 4
+ URLFetchServiceError_DEADLINE_EXCEEDED URLFetchServiceError_ErrorCode = 5
+ URLFetchServiceError_SSL_CERTIFICATE_ERROR URLFetchServiceError_ErrorCode = 6
+ URLFetchServiceError_DNS_ERROR URLFetchServiceError_ErrorCode = 7
+ URLFetchServiceError_CLOSED URLFetchServiceError_ErrorCode = 8
+ URLFetchServiceError_INTERNAL_TRANSIENT_ERROR URLFetchServiceError_ErrorCode = 9
+ URLFetchServiceError_TOO_MANY_REDIRECTS URLFetchServiceError_ErrorCode = 10
+ URLFetchServiceError_MALFORMED_REPLY URLFetchServiceError_ErrorCode = 11
+ URLFetchServiceError_CONNECTION_ERROR URLFetchServiceError_ErrorCode = 12
+)
+
+var URLFetchServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INVALID_URL",
+ 2: "FETCH_ERROR",
+ 3: "UNSPECIFIED_ERROR",
+ 4: "RESPONSE_TOO_LARGE",
+ 5: "DEADLINE_EXCEEDED",
+ 6: "SSL_CERTIFICATE_ERROR",
+ 7: "DNS_ERROR",
+ 8: "CLOSED",
+ 9: "INTERNAL_TRANSIENT_ERROR",
+ 10: "TOO_MANY_REDIRECTS",
+ 11: "MALFORMED_REPLY",
+ 12: "CONNECTION_ERROR",
+}
+var URLFetchServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INVALID_URL": 1,
+ "FETCH_ERROR": 2,
+ "UNSPECIFIED_ERROR": 3,
+ "RESPONSE_TOO_LARGE": 4,
+ "DEADLINE_EXCEEDED": 5,
+ "SSL_CERTIFICATE_ERROR": 6,
+ "DNS_ERROR": 7,
+ "CLOSED": 8,
+ "INTERNAL_TRANSIENT_ERROR": 9,
+ "TOO_MANY_REDIRECTS": 10,
+ "MALFORMED_REPLY": 11,
+ "CONNECTION_ERROR": 12,
+}
+
+func (x URLFetchServiceError_ErrorCode) Enum() *URLFetchServiceError_ErrorCode {
+ p := new(URLFetchServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x URLFetchServiceError_ErrorCode) String() string {
+ return proto.EnumName(URLFetchServiceError_ErrorCode_name, int32(x))
+}
+func (x *URLFetchServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(URLFetchServiceError_ErrorCode_value, data, "URLFetchServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = URLFetchServiceError_ErrorCode(value)
+ return nil
+}
+
+type URLFetchRequest_RequestMethod int32
+
+const (
+ URLFetchRequest_GET URLFetchRequest_RequestMethod = 1
+ URLFetchRequest_POST URLFetchRequest_RequestMethod = 2
+ URLFetchRequest_HEAD URLFetchRequest_RequestMethod = 3
+ URLFetchRequest_PUT URLFetchRequest_RequestMethod = 4
+ URLFetchRequest_DELETE URLFetchRequest_RequestMethod = 5
+ URLFetchRequest_PATCH URLFetchRequest_RequestMethod = 6
+)
+
+var URLFetchRequest_RequestMethod_name = map[int32]string{
+ 1: "GET",
+ 2: "POST",
+ 3: "HEAD",
+ 4: "PUT",
+ 5: "DELETE",
+ 6: "PATCH",
+}
+var URLFetchRequest_RequestMethod_value = map[string]int32{
+ "GET": 1,
+ "POST": 2,
+ "HEAD": 3,
+ "PUT": 4,
+ "DELETE": 5,
+ "PATCH": 6,
+}
+
+func (x URLFetchRequest_RequestMethod) Enum() *URLFetchRequest_RequestMethod {
+ p := new(URLFetchRequest_RequestMethod)
+ *p = x
+ return p
+}
+func (x URLFetchRequest_RequestMethod) String() string {
+ return proto.EnumName(URLFetchRequest_RequestMethod_name, int32(x))
+}
+func (x *URLFetchRequest_RequestMethod) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(URLFetchRequest_RequestMethod_value, data, "URLFetchRequest_RequestMethod")
+ if err != nil {
+ return err
+ }
+ *x = URLFetchRequest_RequestMethod(value)
+ return nil
+}
+
+type URLFetchServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *URLFetchServiceError) Reset() { *m = URLFetchServiceError{} }
+func (m *URLFetchServiceError) String() string { return proto.CompactTextString(m) }
+func (*URLFetchServiceError) ProtoMessage() {}
+
+type URLFetchRequest struct {
+ Method *URLFetchRequest_RequestMethod `protobuf:"varint,1,req,name=Method,enum=appengine.URLFetchRequest_RequestMethod" json:"Method,omitempty"`
+ Url *string `protobuf:"bytes,2,req,name=Url" json:"Url,omitempty"`
+ Header []*URLFetchRequest_Header `protobuf:"group,3,rep,name=Header" json:"header,omitempty"`
+ Payload []byte `protobuf:"bytes,6,opt,name=Payload" json:"Payload,omitempty"`
+ FollowRedirects *bool `protobuf:"varint,7,opt,name=FollowRedirects,def=1" json:"FollowRedirects,omitempty"`
+ Deadline *float64 `protobuf:"fixed64,8,opt,name=Deadline" json:"Deadline,omitempty"`
+ MustValidateServerCertificate *bool `protobuf:"varint,9,opt,name=MustValidateServerCertificate,def=1" json:"MustValidateServerCertificate,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *URLFetchRequest) Reset() { *m = URLFetchRequest{} }
+func (m *URLFetchRequest) String() string { return proto.CompactTextString(m) }
+func (*URLFetchRequest) ProtoMessage() {}
+
+const Default_URLFetchRequest_FollowRedirects bool = true
+const Default_URLFetchRequest_MustValidateServerCertificate bool = true
+
+func (m *URLFetchRequest) GetMethod() URLFetchRequest_RequestMethod {
+ if m != nil && m.Method != nil {
+ return *m.Method
+ }
+ return URLFetchRequest_GET
+}
+
+func (m *URLFetchRequest) GetUrl() string {
+ if m != nil && m.Url != nil {
+ return *m.Url
+ }
+ return ""
+}
+
+func (m *URLFetchRequest) GetHeader() []*URLFetchRequest_Header {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *URLFetchRequest) GetPayload() []byte {
+ if m != nil {
+ return m.Payload
+ }
+ return nil
+}
+
+func (m *URLFetchRequest) GetFollowRedirects() bool {
+ if m != nil && m.FollowRedirects != nil {
+ return *m.FollowRedirects
+ }
+ return Default_URLFetchRequest_FollowRedirects
+}
+
+func (m *URLFetchRequest) GetDeadline() float64 {
+ if m != nil && m.Deadline != nil {
+ return *m.Deadline
+ }
+ return 0
+}
+
+func (m *URLFetchRequest) GetMustValidateServerCertificate() bool {
+ if m != nil && m.MustValidateServerCertificate != nil {
+ return *m.MustValidateServerCertificate
+ }
+ return Default_URLFetchRequest_MustValidateServerCertificate
+}
+
+type URLFetchRequest_Header struct {
+ Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"`
+ Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *URLFetchRequest_Header) Reset() { *m = URLFetchRequest_Header{} }
+func (m *URLFetchRequest_Header) String() string { return proto.CompactTextString(m) }
+func (*URLFetchRequest_Header) ProtoMessage() {}
+
+func (m *URLFetchRequest_Header) GetKey() string {
+ if m != nil && m.Key != nil {
+ return *m.Key
+ }
+ return ""
+}
+
+func (m *URLFetchRequest_Header) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+type URLFetchResponse struct {
+ Content []byte `protobuf:"bytes,1,opt,name=Content" json:"Content,omitempty"`
+ StatusCode *int32 `protobuf:"varint,2,req,name=StatusCode" json:"StatusCode,omitempty"`
+ Header []*URLFetchResponse_Header `protobuf:"group,3,rep,name=Header" json:"header,omitempty"`
+ ContentWasTruncated *bool `protobuf:"varint,6,opt,name=ContentWasTruncated,def=0" json:"ContentWasTruncated,omitempty"`
+ ExternalBytesSent *int64 `protobuf:"varint,7,opt,name=ExternalBytesSent" json:"ExternalBytesSent,omitempty"`
+ ExternalBytesReceived *int64 `protobuf:"varint,8,opt,name=ExternalBytesReceived" json:"ExternalBytesReceived,omitempty"`
+ FinalUrl *string `protobuf:"bytes,9,opt,name=FinalUrl" json:"FinalUrl,omitempty"`
+ ApiCpuMilliseconds *int64 `protobuf:"varint,10,opt,name=ApiCpuMilliseconds,def=0" json:"ApiCpuMilliseconds,omitempty"`
+ ApiBytesSent *int64 `protobuf:"varint,11,opt,name=ApiBytesSent,def=0" json:"ApiBytesSent,omitempty"`
+ ApiBytesReceived *int64 `protobuf:"varint,12,opt,name=ApiBytesReceived,def=0" json:"ApiBytesReceived,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *URLFetchResponse) Reset() { *m = URLFetchResponse{} }
+func (m *URLFetchResponse) String() string { return proto.CompactTextString(m) }
+func (*URLFetchResponse) ProtoMessage() {}
+
+const Default_URLFetchResponse_ContentWasTruncated bool = false
+const Default_URLFetchResponse_ApiCpuMilliseconds int64 = 0
+const Default_URLFetchResponse_ApiBytesSent int64 = 0
+const Default_URLFetchResponse_ApiBytesReceived int64 = 0
+
+func (m *URLFetchResponse) GetContent() []byte {
+ if m != nil {
+ return m.Content
+ }
+ return nil
+}
+
+func (m *URLFetchResponse) GetStatusCode() int32 {
+ if m != nil && m.StatusCode != nil {
+ return *m.StatusCode
+ }
+ return 0
+}
+
+func (m *URLFetchResponse) GetHeader() []*URLFetchResponse_Header {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *URLFetchResponse) GetContentWasTruncated() bool {
+ if m != nil && m.ContentWasTruncated != nil {
+ return *m.ContentWasTruncated
+ }
+ return Default_URLFetchResponse_ContentWasTruncated
+}
+
+func (m *URLFetchResponse) GetExternalBytesSent() int64 {
+ if m != nil && m.ExternalBytesSent != nil {
+ return *m.ExternalBytesSent
+ }
+ return 0
+}
+
+func (m *URLFetchResponse) GetExternalBytesReceived() int64 {
+ if m != nil && m.ExternalBytesReceived != nil {
+ return *m.ExternalBytesReceived
+ }
+ return 0
+}
+
+func (m *URLFetchResponse) GetFinalUrl() string {
+ if m != nil && m.FinalUrl != nil {
+ return *m.FinalUrl
+ }
+ return ""
+}
+
+func (m *URLFetchResponse) GetApiCpuMilliseconds() int64 {
+ if m != nil && m.ApiCpuMilliseconds != nil {
+ return *m.ApiCpuMilliseconds
+ }
+ return Default_URLFetchResponse_ApiCpuMilliseconds
+}
+
+func (m *URLFetchResponse) GetApiBytesSent() int64 {
+ if m != nil && m.ApiBytesSent != nil {
+ return *m.ApiBytesSent
+ }
+ return Default_URLFetchResponse_ApiBytesSent
+}
+
+func (m *URLFetchResponse) GetApiBytesReceived() int64 {
+ if m != nil && m.ApiBytesReceived != nil {
+ return *m.ApiBytesReceived
+ }
+ return Default_URLFetchResponse_ApiBytesReceived
+}
+
+type URLFetchResponse_Header struct {
+ Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"`
+ Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *URLFetchResponse_Header) Reset() { *m = URLFetchResponse_Header{} }
+func (m *URLFetchResponse_Header) String() string { return proto.CompactTextString(m) }
+func (*URLFetchResponse_Header) ProtoMessage() {}
+
+func (m *URLFetchResponse_Header) GetKey() string {
+ if m != nil && m.Key != nil {
+ return *m.Key
+ }
+ return ""
+}
+
+func (m *URLFetchResponse_Header) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+func init() {
+}
diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
new file mode 100644
index 0000000..f695edf
--- /dev/null
+++ b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
@@ -0,0 +1,64 @@
+syntax = "proto2";
+option go_package = "urlfetch";
+
+package appengine;
+
+message URLFetchServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INVALID_URL = 1;
+ FETCH_ERROR = 2;
+ UNSPECIFIED_ERROR = 3;
+ RESPONSE_TOO_LARGE = 4;
+ DEADLINE_EXCEEDED = 5;
+ SSL_CERTIFICATE_ERROR = 6;
+ DNS_ERROR = 7;
+ CLOSED = 8;
+ INTERNAL_TRANSIENT_ERROR = 9;
+ TOO_MANY_REDIRECTS = 10;
+ MALFORMED_REPLY = 11;
+ CONNECTION_ERROR = 12;
+ }
+}
+
+message URLFetchRequest {
+ enum RequestMethod {
+ GET = 1;
+ POST = 2;
+ HEAD = 3;
+ PUT = 4;
+ DELETE = 5;
+ PATCH = 6;
+ }
+ required RequestMethod Method = 1;
+ required string Url = 2;
+ repeated group Header = 3 {
+ required string Key = 4;
+ required string Value = 5;
+ }
+ optional bytes Payload = 6 [ctype=CORD];
+
+ optional bool FollowRedirects = 7 [default=true];
+
+ optional double Deadline = 8;
+
+ optional bool MustValidateServerCertificate = 9 [default=true];
+}
+
+message URLFetchResponse {
+ optional bytes Content = 1;
+ required int32 StatusCode = 2;
+ repeated group Header = 3 {
+ required string Key = 4;
+ required string Value = 5;
+ }
+ optional bool ContentWasTruncated = 6 [default=false];
+ optional int64 ExternalBytesSent = 7;
+ optional int64 ExternalBytesReceived = 8;
+
+ optional string FinalUrl = 9;
+
+ optional int64 ApiCpuMilliseconds = 10 [default=0];
+ optional int64 ApiBytesSent = 11 [default=0];
+ optional int64 ApiBytesReceived = 12 [default=0];
+}
diff --git a/vendor/google.golang.org/appengine/namespace.go b/vendor/google.golang.org/appengine/namespace.go
new file mode 100644
index 0000000..21860ca
--- /dev/null
+++ b/vendor/google.golang.org/appengine/namespace.go
@@ -0,0 +1,25 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package appengine
+
+import (
+ "fmt"
+ "regexp"
+
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+)
+
+// Namespace returns a replacement context that operates within the given namespace.
+func Namespace(c context.Context, namespace string) (context.Context, error) {
+ if !validNamespace.MatchString(namespace) {
+ return nil, fmt.Errorf("appengine: namespace %q does not match /%s/", namespace, validNamespace)
+ }
+ return internal.NamespacedContext(c, namespace), nil
+}
+
+// validNamespace matches valid namespace names.
+var validNamespace = regexp.MustCompile(`^[0-9A-Za-z._-]{0,100}$`)
diff --git a/vendor/google.golang.org/appengine/timeout.go b/vendor/google.golang.org/appengine/timeout.go
new file mode 100644
index 0000000..05642a9
--- /dev/null
+++ b/vendor/google.golang.org/appengine/timeout.go
@@ -0,0 +1,20 @@
+// Copyright 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package appengine
+
+import "golang.org/x/net/context"
+
+// IsTimeoutError reports whether err is a timeout error.
+func IsTimeoutError(err error) bool {
+ if err == context.DeadlineExceeded {
+ return true
+ }
+ if t, ok := err.(interface {
+ IsTimeout() bool
+ }); ok {
+ return t.IsTimeout()
+ }
+ return false
+}
diff --git a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go
new file mode 100644
index 0000000..6ffe1e6
--- /dev/null
+++ b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go
@@ -0,0 +1,210 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package urlfetch provides an http.RoundTripper implementation
+// for fetching URLs via App Engine's urlfetch service.
+package urlfetch // import "google.golang.org/appengine/urlfetch"
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/urlfetch"
+)
+
+// Transport is an implementation of http.RoundTripper for
+// App Engine. Users should generally create an http.Client using
+// this transport and use the Client rather than using this transport
+// directly.
+type Transport struct {
+ Context context.Context
+
+ // Controls whether the application checks the validity of SSL certificates
+ // over HTTPS connections. A value of false (the default) instructs the
+ // application to send a request to the server only if the certificate is
+ // valid and signed by a trusted certificate authority (CA), and also
+ // includes a hostname that matches the certificate. A value of true
+ // instructs the application to perform no certificate validation.
+ AllowInvalidServerCertificate bool
+}
+
+// Verify statically that *Transport implements http.RoundTripper.
+var _ http.RoundTripper = (*Transport)(nil)
+
+// Client returns an *http.Client using a default urlfetch Transport. This
+// client will have the default deadline of 5 seconds, and will check the
+// validity of SSL certificates.
+//
+// Any deadline of the provided context will be used for requests through this client;
+// if the client does not have a deadline then a 5 second default is used.
+func Client(ctx context.Context) *http.Client {
+ return &http.Client{
+ Transport: &Transport{
+ Context: ctx,
+ },
+ }
+}
+
+type bodyReader struct {
+ content []byte
+ truncated bool
+ closed bool
+}
+
+// ErrTruncatedBody is the error returned after the final Read() from a
+// response's Body if the body has been truncated by App Engine's proxy.
+var ErrTruncatedBody = errors.New("urlfetch: truncated body")
+
+func statusCodeToText(code int) string {
+ if t := http.StatusText(code); t != "" {
+ return t
+ }
+ return strconv.Itoa(code)
+}
+
+func (br *bodyReader) Read(p []byte) (n int, err error) {
+ if br.closed {
+ if br.truncated {
+ return 0, ErrTruncatedBody
+ }
+ return 0, io.EOF
+ }
+ n = copy(p, br.content)
+ if n > 0 {
+ br.content = br.content[n:]
+ return
+ }
+ if br.truncated {
+ br.closed = true
+ return 0, ErrTruncatedBody
+ }
+ return 0, io.EOF
+}
+
+func (br *bodyReader) Close() error {
+ br.closed = true
+ br.content = nil
+ return nil
+}
+
+// A map of the URL Fetch-accepted methods that take a request body.
+var methodAcceptsRequestBody = map[string]bool{
+ "POST": true,
+ "PUT": true,
+ "PATCH": true,
+}
+
+// urlString returns a valid string given a URL. This function is necessary because
+// the String method of URL doesn't correctly handle URLs with non-empty Opaque values.
+// See http://code.google.com/p/go/issues/detail?id=4860.
+func urlString(u *url.URL) string {
+ if u.Opaque == "" || strings.HasPrefix(u.Opaque, "//") {
+ return u.String()
+ }
+ aux := *u
+ aux.Opaque = "//" + aux.Host + aux.Opaque
+ return aux.String()
+}
+
+// RoundTrip issues a single HTTP request and returns its response. Per the
+// http.RoundTripper interface, RoundTrip only returns an error if there
+// was an unsupported request or the URL Fetch proxy fails.
+// Note that HTTP response codes such as 5xx, 403, 404, etc are not
+// errors as far as the transport is concerned and will be returned
+// with err set to nil.
+func (t *Transport) RoundTrip(req *http.Request) (res *http.Response, err error) {
+ methNum, ok := pb.URLFetchRequest_RequestMethod_value[req.Method]
+ if !ok {
+ return nil, fmt.Errorf("urlfetch: unsupported HTTP method %q", req.Method)
+ }
+
+ method := pb.URLFetchRequest_RequestMethod(methNum)
+
+ freq := &pb.URLFetchRequest{
+ Method: &method,
+ Url: proto.String(urlString(req.URL)),
+ FollowRedirects: proto.Bool(false), // http.Client's responsibility
+ MustValidateServerCertificate: proto.Bool(!t.AllowInvalidServerCertificate),
+ }
+ if deadline, ok := t.Context.Deadline(); ok {
+ freq.Deadline = proto.Float64(deadline.Sub(time.Now()).Seconds())
+ }
+
+ for k, vals := range req.Header {
+ for _, val := range vals {
+ freq.Header = append(freq.Header, &pb.URLFetchRequest_Header{
+ Key: proto.String(k),
+ Value: proto.String(val),
+ })
+ }
+ }
+ if methodAcceptsRequestBody[req.Method] && req.Body != nil {
+ // Avoid a []byte copy if req.Body has a Bytes method.
+ switch b := req.Body.(type) {
+ case interface {
+ Bytes() []byte
+ }:
+ freq.Payload = b.Bytes()
+ default:
+ freq.Payload, err = ioutil.ReadAll(req.Body)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ fres := &pb.URLFetchResponse{}
+ if err := internal.Call(t.Context, "urlfetch", "Fetch", freq, fres); err != nil {
+ return nil, err
+ }
+
+ res = &http.Response{}
+ res.StatusCode = int(*fres.StatusCode)
+ res.Status = fmt.Sprintf("%d %s", res.StatusCode, statusCodeToText(res.StatusCode))
+ res.Header = make(http.Header)
+ res.Request = req
+
+ // Faked:
+ res.ProtoMajor = 1
+ res.ProtoMinor = 1
+ res.Proto = "HTTP/1.1"
+ res.Close = true
+
+ for _, h := range fres.Header {
+ hkey := http.CanonicalHeaderKey(*h.Key)
+ hval := *h.Value
+ if hkey == "Content-Length" {
+ // Will get filled in below for all but HEAD requests.
+ if req.Method == "HEAD" {
+ res.ContentLength, _ = strconv.ParseInt(hval, 10, 64)
+ }
+ continue
+ }
+ res.Header.Add(hkey, hval)
+ }
+
+ if req.Method != "HEAD" {
+ res.ContentLength = int64(len(fres.Content))
+ }
+
+ truncated := fres.GetContentWasTruncated()
+ res.Body = &bodyReader{content: fres.Content, truncated: truncated}
+ return
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("urlfetch", pb.URLFetchServiceError_ErrorCode_name)
+ internal.RegisterTimeoutErrorCode("urlfetch", int32(pb.URLFetchServiceError_DEADLINE_EXCEEDED))
+}
diff --git a/vendor/google.golang.org/cloud/AUTHORS b/vendor/google.golang.org/cloud/AUTHORS
new file mode 100644
index 0000000..f92e5cf
--- /dev/null
+++ b/vendor/google.golang.org/cloud/AUTHORS
@@ -0,0 +1,14 @@
+# This is the official list of cloud authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS files.
+# See the latter for an explanation.
+
+# Names should be added to this file as:
+# Name or Organization <email address>
+# The email address is not required for organizations.
+
+Google Inc.
+Ingo Oeser <nightlyone@googlemail.com>
+Palm Stone Games, Inc.
+Paweł Knap <pawelknap88@gmail.com>
+Péter Szilágyi <peterke@gmail.com>
+Tyler Treat <ttreat31@gmail.com>
diff --git a/vendor/google.golang.org/cloud/CONTRIBUTING.md b/vendor/google.golang.org/cloud/CONTRIBUTING.md
new file mode 100644
index 0000000..135a1a1
--- /dev/null
+++ b/vendor/google.golang.org/cloud/CONTRIBUTING.md
@@ -0,0 +1,115 @@
+# Contributing
+
+1. Sign one of the contributor license agreements below.
+1. `go get golang.org/x/review/git-codereview` to install the code reviewing tool.
+1. Get the cloud package by running `go get -d google.golang.org/cloud`.
+ 1. If you have already checked out the source, make sure that the remote git
+ origin is https://code.googlesource.com/gocloud:
+
+ git remote set-url origin https://code.googlesource.com/gocloud
+1. Make sure your auth is configured correctly by visiting
+ https://code.googlesource.com, clicking "Generate Password", and following
+ the directions.
+1. Make changes and create a change by running `git codereview change <name>`,
+provide a commit message, and use `git codereview mail` to create a Gerrit CL.
+1. Keep amending to the change and mail as your receive feedback.
+
+## Integration Tests
+
+In addition to the unit tests, you may run the integration test suite.
+
+To run the integrations tests, creating and configuration of a project in the
+Google Developers Console is required. Once you create a project, set the
+following environment variables to be able to run the against the actual APIs.
+
+- **GCLOUD_TESTS_GOLANG_PROJECT_ID**: Developers Console project's ID (e.g. bamboo-shift-455)
+- **GCLOUD_TESTS_GOLANG_KEY**: The path to the JSON key file.
+
+Create a storage bucket with the same name as the project ID set in **GCLOUD_TESTS_GOLANG_PROJECT_ID**.
+The storage integration test will create and delete some objects in this bucket.
+
+Install the [gcloud command-line tool][gcloudcli] to your machine and use it
+to create the indexes used in the datastore integration tests with indexes
+found in `datastore/testdata/index.yaml`:
+
+From the project's root directory:
+
+``` sh
+# Set the default project in your env
+$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID
+
+# Authenticate the gcloud tool with your account
+$ gcloud auth login
+
+# Create the indexes
+$ gcloud preview datastore create-indexes datastore/testdata/index.yaml
+
+```
+
+Once you've set the environment variables, you can run the integration tests by
+running:
+
+``` sh
+$ go test -v google.golang.org/cloud/...
+```
+
+## Contributor License Agreements
+
+Before we can accept your pull requests you'll need to sign a Contributor
+License Agreement (CLA):
+
+- **If you are an individual writing original source code** and **you own the
+- intellectual property**, then you'll need to sign an [individual CLA][indvcla].
+- **If you work for a company that wants to allow you to contribute your work**,
+then you'll need to sign a [corporate CLA][corpcla].
+
+You can sign these electronically (just scroll to the bottom). After that,
+we'll be able to accept your pull requests.
+
+## Contributor Code of Conduct
+
+As contributors and maintainers of this project,
+and in the interest of fostering an open and welcoming community,
+we pledge to respect all people who contribute through reporting issues,
+posting feature requests, updating documentation,
+submitting pull requests or patches, and other activities.
+
+We are committed to making participation in this project
+a harassment-free experience for everyone,
+regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance,
+body size, race, ethnicity, age, religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery
+* Personal attacks
+* Trolling or insulting/derogatory comments
+* Public or private harassment
+* Publishing other's private information,
+such as physical or electronic
+addresses, without explicit permission
+* Other unethical or unprofessional conduct.
+
+Project maintainers have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct.
+By adopting this Code of Conduct,
+project maintainers commit themselves to fairly and consistently
+applying these principles to every aspect of managing this project.
+Project maintainers who do not follow or enforce the Code of Conduct
+may be permanently removed from the project team.
+
+This code of conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior
+may be reported by opening an issue
+or contacting one or more of the project maintainers.
+
+This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0,
+available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/)
+
+[gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/
+[indvcla]: https://developers.google.com/open-source/cla/individual
+[corpcla]: https://developers.google.com/open-source/cla/corporate
diff --git a/vendor/google.golang.org/cloud/CONTRIBUTORS b/vendor/google.golang.org/cloud/CONTRIBUTORS
new file mode 100644
index 0000000..27db791
--- /dev/null
+++ b/vendor/google.golang.org/cloud/CONTRIBUTORS
@@ -0,0 +1,29 @@
+# People who have agreed to one of the CLAs and can contribute patches.
+# The AUTHORS file lists the copyright holders; this file
+# lists people. For example, Google employees are listed here
+# but not in AUTHORS, because Google holds the copyright.
+#
+# https://developers.google.com/open-source/cla/individual
+# https://developers.google.com/open-source/cla/corporate
+#
+# Names should be added to this file as:
+# Name <email address>
+
+# Keep the list alphabetically sorted.
+
+Andrew Gerrand <adg@golang.org>
+Brad Fitzpatrick <bradfitz@golang.org>
+Burcu Dogan <jbd@google.com>
+Dave Day <djd@golang.org>
+David Sansome <me@davidsansome.com>
+David Symonds <dsymonds@golang.org>
+Glenn Lewis <gmlewis@google.com>
+Ingo Oeser <nightlyone@googlemail.com>
+Johan Euphrosine <proppy@google.com>
+Luna Duclos <luna.duclos@palmstonegames.com>
+Michael McGreevy <mcgreevy@golang.org>
+Omar Jarjur <ojarjur@google.com>
+Paweł Knap <pawelknap88@gmail.com>
+Péter Szilágyi <peterke@gmail.com>
+Toby Burress <kurin@google.com>
+Tyler Treat <ttreat31@gmail.com>
diff --git a/vendor/google.golang.org/cloud/README.md b/vendor/google.golang.org/cloud/README.md
new file mode 100644
index 0000000..13b7a0d
--- /dev/null
+++ b/vendor/google.golang.org/cloud/README.md
@@ -0,0 +1,186 @@
+# Google Cloud for Go
+
+[![Build Status](https://travis-ci.org/GoogleCloudPlatform/gcloud-golang.svg?branch=master)](https://travis-ci.org/GoogleCloudPlatform/gcloud-golang)
+[![GoDoc](https://godoc.org/google.golang.org/cloud?status.svg)](https://godoc.org/google.golang.org/cloud)
+
+``` go
+import "google.golang.org/cloud"
+```
+
+**NOTE:** These packages are under development, and may occasionally make
+backwards-incompatible changes.
+
+**NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud).
+
+Go packages for Google Cloud Platform services. Supported APIs are:
+
+Google API | Status | Package
+-------------------------------|--------------|-----------------------------------------------------------
+[Datastore][cloud-datastore] | beta | [`google.golang.org/cloud/datastore`][cloud-datastore-ref]
+[Storage][cloud-storage] | beta | [`google.golang.org/cloud/storage`][cloud-storage-ref]
+[Pub/Sub][cloud-pubsub] | experimental | [`google.golang.org/cloud/pubsub`][cloud-pubsub-ref]
+[BigTable][cloud-bigtable] | stable | [`google.golang.org/cloud/bigtable`][cloud-bigtable-ref]
+[BigQuery][cloud-bigquery] | experimental | [`google.golang.org/cloud/bigquery`][cloud-bigquery-ref]
+[Logging][cloud-logging] | experimental | [`google.golang.org/cloud/logging`][cloud-logging-ref]
+
+> **Experimental status**: the API is still being actively developed. As a
+> result, it might change in backward-incompatible ways and is not recommended
+> for production use.
+>
+> **Beta status**: the API is largely complete, but still has outstanding
+> features and bugs to be addressed. There may be minor backwards-incompatible
+> changes where necessary.
+>
+> **Stable status**: the API is mature and ready for production use. We will
+> continue addressing bugs and feature requests.
+
+Documentation and examples are available at
+https://godoc.org/google.golang.org/cloud
+
+## Authorization
+
+By default, each API will use [Google Application Default Credentials][default-creds]
+for authorization credentials used in calling the API endpoints. This will allow your
+application to run in many environments without requiring explicit configuration.
+
+Manually-configured authorization can be achieved using the
+[`golang.org/x/oauth2`](https://godoc.org/golang.org/x/oauth2) package to
+create an `oauth2.TokenSource`. This token source can be passed to the `NewClient`
+function for the relevant API using a
+[`cloud.WithTokenSource`](https://godoc.org/google.golang.org/cloud#WithTokenSource)
+option.
+
+## Google Cloud Datastore
+
+[Google Cloud Datastore][cloud-datastore] ([docs][cloud-datastore-docs]) is a fully-
+managed, schemaless database for storing non-relational data. Cloud Datastore
+automatically scales with your users and supports ACID transactions, high availability
+of reads and writes, strong consistency for reads and ancestor queries, and eventual
+consistency for all other queries.
+
+Follow the [activation instructions][cloud-datastore-activation] to use the Google
+Cloud Datastore API with your project.
+
+https://godoc.org/google.golang.org/cloud/datastore
+
+First create a `datastore.Client` to use throughout your application:
+
+```go
+client, err := datastore.NewClient(ctx, "my-project-id")
+if err != nil {
+ log.Fatalln(err)
+}
+```
+
+Then use that client to interact with the API:
+
+```go
+type Post struct {
+ Title string
+ Body string `datastore:",noindex"`
+ PublishedAt time.Time
+}
+keys := []*datastore.Key{
+ datastore.NewKey(ctx, "Post", "post1", 0, nil),
+ datastore.NewKey(ctx, "Post", "post2", 0, nil),
+}
+posts := []*Post{
+ {Title: "Post 1", Body: "...", PublishedAt: time.Now()},
+ {Title: "Post 2", Body: "...", PublishedAt: time.Now()},
+}
+if _, err := client.PutMulti(ctx, keys, posts); err != nil {
+ log.Fatal(err)
+}
+```
+
+## Google Cloud Storage
+
+[Google Cloud Storage][cloud-storage] ([docs][cloud-storage-docs]) allows you to store
+data on Google infrastructure with very high reliability, performance and availability,
+and can be used to distribute large data objects to users via direct download.
+
+https://godoc.org/google.golang.org/cloud/storage
+
+First create a `storage.Client` to use throughout your application:
+
+```go
+client, err := storage.NewClient(ctx)
+if err != nil {
+ log.Fatal(err)
+}
+```
+
+```go
+// Read the object1 from bucket.
+rc, err := client.Bucket("bucket").Object("object1").NewReader(ctx)
+if err != nil {
+ log.Fatal(err)
+}
+defer rc.Close()
+body, err := ioutil.ReadAll(rc)
+if err != nil {
+ log.Fatal(err)
+}
+```
+
+## Google Cloud Pub/Sub
+
+[Google Cloud Pub/Sub][cloud-pubsub] ([docs][cloud-pubsub-docs]) allows you to connect
+your services with reliable, many-to-many, asynchronous messaging hosted on Google's
+infrastructure. Cloud Pub/Sub automatically scales as you need it and provides a foundation
+for building your own robust, global services.
+
+https://godoc.org/google.golang.org/cloud/pubsub
+
+
+```go
+// Publish "hello world" on topic1.
+msgIDs, err := pubsub.Publish(ctx, "topic1", &pubsub.Message{
+ Data: []byte("hello world"),
+})
+if err != nil {
+ log.Println(err)
+}
+// Pull messages via subscription1.
+msgs, err := pubsub.Pull(ctx, "subscription1", 1)
+if err != nil {
+ log.Println(err)
+}
+```
+
+## Contributing
+
+Contributions are welcome. Please, see the
+[CONTRIBUTING](https://github.com/GoogleCloudPlatform/gcloud-golang/blob/master/CONTRIBUTING.md)
+document for details. We're using Gerrit for our code reviews. Please don't open pull
+requests against this repo, new pull requests will be automatically closed.
+
+Please note that this project is released with a Contributor Code of Conduct.
+By participating in this project you agree to abide by its terms.
+See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/gcloud-golang/blob/master/CONTRIBUTING.md#contributor-code-of-conduct)
+for more information.
+
+[cloud-datastore]: https://cloud.google.com/datastore/
+[cloud-datastore-ref]: https://godoc.org/google.golang.org/cloud/datastore
+[cloud-datastore-docs]: https://cloud.google.com/datastore/docs
+[cloud-datastore-activation]: https://cloud.google.com/datastore/docs/activate
+
+[cloud-pubsub]: https://cloud.google.com/pubsub/
+[cloud-pubsub-ref]: https://godoc.org/google.golang.org/cloud/pubsub
+[cloud-pubsub-docs]: https://cloud.google.com/pubsub/docs
+
+[cloud-storage]: https://cloud.google.com/storage/
+[cloud-storage-ref]: https://godoc.org/google.golang.org/cloud/storage
+[cloud-storage-docs]: https://cloud.google.com/storage/docs/overview
+[cloud-storage-create-bucket]: https://cloud.google.com/storage/docs/cloud-console#_creatingbuckets
+
+[cloud-bigtable]: https://cloud.google.com/bigtable/
+[cloud-bigtable-ref]: https://godoc.org/google.golang.org/cloud/bigtable
+
+[cloud-bigquery]: https://cloud.google.com/bigquery/
+[cloud-bigquery-ref]: https://godoc.org/google.golang.org/cloud/bigquery
+
+[cloud-logging]: https://cloud.google.com/logging/
+[cloud-logging-ref]: https://godoc.org/google.golang.org/cloud/logging
+
+[default-creds]: https://developers.google.com/identity/protocols/application-default-credentials
diff --git a/vendor/google.golang.org/cloud/cloud.go b/vendor/google.golang.org/cloud/cloud.go
new file mode 100644
index 0000000..98be1f4
--- /dev/null
+++ b/vendor/google.golang.org/cloud/cloud.go
@@ -0,0 +1,56 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package cloud contains Google Cloud Platform APIs related types
+// and common functions.
+package cloud // import "google.golang.org/cloud"
+
+import (
+ "net/http"
+
+ "golang.org/x/net/context"
+ "google.golang.org/cloud/internal"
+)
+
+// NewContext returns a new context that uses the provided http.Client.
+// Provided http.Client is responsible to authorize and authenticate
+// the requests made to the Google Cloud APIs.
+// It mutates the client's original Transport to append the cloud
+// package's user-agent to the outgoing requests.
+// You can obtain the project ID from the Google Developers Console,
+// https://console.developers.google.com.
+func NewContext(projID string, c *http.Client) context.Context {
+ if c == nil {
+ panic("invalid nil *http.Client passed to NewContext")
+ }
+ return WithContext(context.Background(), projID, c)
+}
+
+// WithContext returns a new context in a similar way NewContext does,
+// but initiates the new context with the specified parent.
+func WithContext(parent context.Context, projID string, c *http.Client) context.Context {
+ // TODO(bradfitz): delete internal.Transport. It's too wrappy for what it does.
+ // Do User-Agent some other way.
+ if c == nil {
+ panic("invalid nil *http.Client passed to WithContext")
+ }
+ if _, ok := c.Transport.(*internal.Transport); !ok {
+ base := c.Transport
+ if base == nil {
+ base = http.DefaultTransport
+ }
+ c.Transport = &internal.Transport{Base: base}
+ }
+ return internal.WithContext(parent, projID, c)
+}
diff --git a/vendor/google.golang.org/cloud/internal/opts/option.go b/vendor/google.golang.org/cloud/internal/opts/option.go
new file mode 100644
index 0000000..844d310
--- /dev/null
+++ b/vendor/google.golang.org/cloud/internal/opts/option.go
@@ -0,0 +1,25 @@
+// Package opts holds the DialOpts struct, configurable by
+// cloud.ClientOptions to set up transports for cloud packages.
+//
+// This is a separate page to prevent cycles between the core
+// cloud packages.
+package opts
+
+import (
+ "net/http"
+
+ "golang.org/x/oauth2"
+ "google.golang.org/grpc"
+)
+
+type DialOpt struct {
+ Endpoint string
+ Scopes []string
+ UserAgent string
+
+ TokenSource oauth2.TokenSource
+
+ HTTPClient *http.Client
+ GRPCClient *grpc.ClientConn
+ GRPCDialOpts []grpc.DialOption
+}
diff --git a/vendor/google.golang.org/cloud/internal/transport/dial.go b/vendor/google.golang.org/cloud/internal/transport/dial.go
new file mode 100644
index 0000000..a0f8bd9
--- /dev/null
+++ b/vendor/google.golang.org/cloud/internal/transport/dial.go
@@ -0,0 +1,101 @@
+// Copyright 2015 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/google"
+ "google.golang.org/cloud"
+ "google.golang.org/cloud/internal/opts"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/credentials/oauth"
+)
+
+// ErrHTTP is returned when on a non-200 HTTP response.
+type ErrHTTP struct {
+ StatusCode int
+ Body []byte
+ err error
+}
+
+func (e *ErrHTTP) Error() string {
+ if e.err == nil {
+ return fmt.Sprintf("error during call, http status code: %v %s", e.StatusCode, e.Body)
+ }
+ return e.err.Error()
+}
+
+// NewHTTPClient returns an HTTP client for use communicating with a Google cloud
+// service, configured with the given ClientOptions. It also returns the endpoint
+// for the service as specified in the options.
+func NewHTTPClient(ctx context.Context, opt ...cloud.ClientOption) (*http.Client, string, error) {
+ var o opts.DialOpt
+ for _, opt := range opt {
+ opt.Resolve(&o)
+ }
+ if o.GRPCClient != nil {
+ return nil, "", errors.New("unsupported GRPC base transport specified")
+ }
+ // TODO(djd): Wrap all http.Clients with appropriate internal version to add
+ // UserAgent header and prepend correct endpoint.
+ if o.HTTPClient != nil {
+ return o.HTTPClient, o.Endpoint, nil
+ }
+ if o.TokenSource == nil {
+ var err error
+ o.TokenSource, err = google.DefaultTokenSource(ctx, o.Scopes...)
+ if err != nil {
+ return nil, "", fmt.Errorf("google.DefaultTokenSource: %v", err)
+ }
+ }
+ return oauth2.NewClient(ctx, o.TokenSource), o.Endpoint, nil
+}
+
+// DialGRPC returns a GRPC connection for use communicating with a Google cloud
+// service, configured with the given ClientOptions.
+func DialGRPC(ctx context.Context, opt ...cloud.ClientOption) (*grpc.ClientConn, error) {
+ var o opts.DialOpt
+ for _, opt := range opt {
+ opt.Resolve(&o)
+ }
+ if o.HTTPClient != nil {
+ return nil, errors.New("unsupported HTTP base transport specified")
+ }
+ if o.GRPCClient != nil {
+ return o.GRPCClient, nil
+ }
+ if o.TokenSource == nil {
+ var err error
+ o.TokenSource, err = google.DefaultTokenSource(ctx, o.Scopes...)
+ if err != nil {
+ return nil, fmt.Errorf("google.DefaultTokenSource: %v", err)
+ }
+ }
+ grpcOpts := []grpc.DialOption{
+ grpc.WithPerRPCCredentials(oauth.TokenSource{o.TokenSource}),
+ grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, "")),
+ }
+ grpcOpts = append(grpcOpts, o.GRPCDialOpts...)
+ if o.UserAgent != "" {
+ grpcOpts = append(grpcOpts, grpc.WithUserAgent(o.UserAgent))
+ }
+ return grpc.Dial(o.Endpoint, grpcOpts...)
+}
diff --git a/vendor/google.golang.org/cloud/key.json.enc b/vendor/google.golang.org/cloud/key.json.enc
new file mode 100644
index 0000000..2f673a8
--- /dev/null
+++ b/vendor/google.golang.org/cloud/key.json.enc
Binary files differ
diff --git a/vendor/google.golang.org/cloud/option.go b/vendor/google.golang.org/cloud/option.go
new file mode 100644
index 0000000..8a443b4
--- /dev/null
+++ b/vendor/google.golang.org/cloud/option.go
@@ -0,0 +1,114 @@
+// Copyright 2015 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cloud
+
+import (
+ "net/http"
+
+ "golang.org/x/oauth2"
+ "google.golang.org/cloud/internal/opts"
+ "google.golang.org/grpc"
+)
+
+// ClientOption is used when construct clients for each cloud service.
+type ClientOption interface {
+ // Resolve configures the given DialOpts for this option.
+ Resolve(*opts.DialOpt)
+}
+
+// WithTokenSource returns a ClientOption that specifies an OAuth2 token
+// source to be used as the basis for authentication.
+func WithTokenSource(s oauth2.TokenSource) ClientOption {
+ return withTokenSource{s}
+}
+
+type withTokenSource struct{ ts oauth2.TokenSource }
+
+func (w withTokenSource) Resolve(o *opts.DialOpt) {
+ o.TokenSource = w.ts
+}
+
+// WithEndpoint returns a ClientOption that overrides the default endpoint
+// to be used for a service.
+func WithEndpoint(url string) ClientOption {
+ return withEndpoint(url)
+}
+
+type withEndpoint string
+
+func (w withEndpoint) Resolve(o *opts.DialOpt) {
+ o.Endpoint = string(w)
+}
+
+// WithScopes returns a ClientOption that overrides the default OAuth2 scopes
+// to be used for a service.
+func WithScopes(scope ...string) ClientOption {
+ return withScopes(scope)
+}
+
+type withScopes []string
+
+func (w withScopes) Resolve(o *opts.DialOpt) {
+ s := make([]string, len(w))
+ copy(s, w)
+ o.Scopes = s
+}
+
+// WithUserAgent returns a ClientOption that sets the User-Agent.
+func WithUserAgent(ua string) ClientOption {
+ return withUA(ua)
+}
+
+type withUA string
+
+func (w withUA) Resolve(o *opts.DialOpt) { o.UserAgent = string(w) }
+
+// WithBaseHTTP returns a ClientOption that specifies the HTTP client to
+// use as the basis of communications. This option may only be used with
+// services that support HTTP as their communication transport.
+func WithBaseHTTP(client *http.Client) ClientOption {
+ return withBaseHTTP{client}
+}
+
+type withBaseHTTP struct{ client *http.Client }
+
+func (w withBaseHTTP) Resolve(o *opts.DialOpt) {
+ o.HTTPClient = w.client
+}
+
+// WithBaseGRPC returns a ClientOption that specifies the gRPC client
+// connection to use as the basis of communications. This option many only be
+// used with services that support gRPC as their communication transport.
+func WithBaseGRPC(client *grpc.ClientConn) ClientOption {
+ return withBaseGRPC{client}
+}
+
+type withBaseGRPC struct{ client *grpc.ClientConn }
+
+func (w withBaseGRPC) Resolve(o *opts.DialOpt) {
+ o.GRPCClient = w.client
+}
+
+// WithGRPCDialOption returns a ClientOption that appends a new grpc.DialOption
+// to an underlying gRPC dial. It does not work with WithBaseGRPC.
+func WithGRPCDialOption(opt grpc.DialOption) ClientOption {
+ return withGRPCDialOption{opt}
+}
+
+type withGRPCDialOption struct{ opt grpc.DialOption }
+
+func (w withGRPCDialOption) Resolve(o *opts.DialOpt) {
+ o.GRPCDialOpts = append(o.GRPCDialOpts, w.opt)
+}
diff --git a/vendor/google.golang.org/cloud/storage/acl.go b/vendor/google.golang.org/cloud/storage/acl.go
new file mode 100644
index 0000000..1c7be32
--- /dev/null
+++ b/vendor/google.golang.org/cloud/storage/acl.go
@@ -0,0 +1,204 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+ "fmt"
+
+ "golang.org/x/net/context"
+ raw "google.golang.org/api/storage/v1"
+)
+
+// ACLRole is the level of access to grant.
+type ACLRole string
+
+const (
+ RoleOwner ACLRole = "OWNER"
+ RoleReader ACLRole = "READER"
+)
+
+// ACLEntity refers to a user or group.
+// They are sometimes referred to as grantees.
+//
+// It could be in the form of:
+// "user-<userId>", "user-<email>", "group-<groupId>", "group-<email>",
+// "domain-<domain>" and "project-team-<projectId>".
+//
+// Or one of the predefined constants: AllUsers, AllAuthenticatedUsers.
+type ACLEntity string
+
+const (
+ AllUsers ACLEntity = "allUsers"
+ AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers"
+)
+
+// ACLRule represents a grant for a role to an entity (user, group or team) for a Google Cloud Storage object or bucket.
+type ACLRule struct {
+ Entity ACLEntity
+ Role ACLRole
+}
+
+// ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object.
+type ACLHandle struct {
+ c *Client
+ bucket string
+ object string
+ isDefault bool
+}
+
+// Delete permanently deletes the ACL entry for the given entity.
+func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) error {
+ if a.object != "" {
+ return a.objectDelete(ctx, entity)
+ }
+ if a.isDefault {
+ return a.bucketDefaultDelete(ctx, entity)
+ }
+ return a.bucketDelete(ctx, entity)
+}
+
+// Set sets the permission level for the given entity.
+func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) error {
+ if a.object != "" {
+ return a.objectSet(ctx, entity, role)
+ }
+ if a.isDefault {
+ return a.bucketDefaultSet(ctx, entity, role)
+ }
+ return a.bucketSet(ctx, entity, role)
+}
+
+// List retrieves ACL entries.
+func (a *ACLHandle) List(ctx context.Context) ([]ACLRule, error) {
+ if a.object != "" {
+ return a.objectList(ctx)
+ }
+ if a.isDefault {
+ return a.bucketDefaultList(ctx)
+ }
+ return a.bucketList(ctx)
+}
+
+func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) {
+ acls, err := a.c.raw.DefaultObjectAccessControls.List(a.bucket).Context(ctx).Do()
+ if err != nil {
+ return nil, fmt.Errorf("storage: error listing default object ACL for bucket %q: %v", a.bucket, err)
+ }
+ r := make([]ACLRule, 0, len(acls.Items))
+ for _, v := range acls.Items {
+ if m, ok := v.(map[string]interface{}); ok {
+ entity, ok1 := m["entity"].(string)
+ role, ok2 := m["role"].(string)
+ if ok1 && ok2 {
+ r = append(r, ACLRule{Entity: ACLEntity(entity), Role: ACLRole(role)})
+ }
+ }
+ }
+ return r, nil
+}
+
+func (a *ACLHandle) bucketDefaultSet(ctx context.Context, entity ACLEntity, role ACLRole) error {
+ acl := &raw.ObjectAccessControl{
+ Bucket: a.bucket,
+ Entity: string(entity),
+ Role: string(role),
+ }
+ _, err := a.c.raw.DefaultObjectAccessControls.Update(a.bucket, string(entity), acl).Context(ctx).Do()
+ if err != nil {
+ return fmt.Errorf("storage: error updating default ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
+ }
+ return nil
+}
+
+func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error {
+ err := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity)).Context(ctx).Do()
+ if err != nil {
+ return fmt.Errorf("storage: error deleting default ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
+ }
+ return nil
+}
+
+func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) {
+ acls, err := a.c.raw.BucketAccessControls.List(a.bucket).Context(ctx).Do()
+ if err != nil {
+ return nil, fmt.Errorf("storage: error listing bucket ACL for bucket %q: %v", a.bucket, err)
+ }
+ r := make([]ACLRule, len(acls.Items))
+ for i, v := range acls.Items {
+ r[i].Entity = ACLEntity(v.Entity)
+ r[i].Role = ACLRole(v.Role)
+ }
+ return r, nil
+}
+
+func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error {
+ acl := &raw.BucketAccessControl{
+ Bucket: a.bucket,
+ Entity: string(entity),
+ Role: string(role),
+ }
+ _, err := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl).Context(ctx).Do()
+ if err != nil {
+ return fmt.Errorf("storage: error updating bucket ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
+ }
+ return nil
+}
+
+func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error {
+ err := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity)).Context(ctx).Do()
+ if err != nil {
+ return fmt.Errorf("storage: error deleting bucket ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
+ }
+ return nil
+}
+
+func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) {
+ acls, err := a.c.raw.ObjectAccessControls.List(a.bucket, a.object).Context(ctx).Do()
+ if err != nil {
+ return nil, fmt.Errorf("storage: error listing object ACL for bucket %q, file %q: %v", a.bucket, a.object, err)
+ }
+ r := make([]ACLRule, 0, len(acls.Items))
+ for _, v := range acls.Items {
+ if m, ok := v.(map[string]interface{}); ok {
+ entity, ok1 := m["entity"].(string)
+ role, ok2 := m["role"].(string)
+ if ok1 && ok2 {
+ r = append(r, ACLRule{Entity: ACLEntity(entity), Role: ACLRole(role)})
+ }
+ }
+ }
+ return r, nil
+}
+
+func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole) error {
+ acl := &raw.ObjectAccessControl{
+ Bucket: a.bucket,
+ Entity: string(entity),
+ Role: string(role),
+ }
+ _, err := a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl).Context(ctx).Do()
+ if err != nil {
+ return fmt.Errorf("storage: error updating object ACL entry for bucket %q, file %q, entity %q: %v", a.bucket, a.object, entity, err)
+ }
+ return nil
+}
+
+func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error {
+ err := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity)).Context(ctx).Do()
+ if err != nil {
+ return fmt.Errorf("storage: error deleting object ACL entry for bucket %q, file %q, entity %q: %v", a.bucket, a.object, entity, err)
+ }
+ return nil
+}
diff --git a/vendor/google.golang.org/cloud/storage/reader.go b/vendor/google.golang.org/cloud/storage/reader.go
new file mode 100644
index 0000000..9e21648
--- /dev/null
+++ b/vendor/google.golang.org/cloud/storage/reader.go
@@ -0,0 +1,55 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+ "io"
+)
+
+// Reader reads a Cloud Storage object.
+type Reader struct {
+ body io.ReadCloser
+ remain, size int64
+ contentType string
+}
+
+func (r *Reader) Close() error {
+ return r.body.Close()
+}
+
+func (r *Reader) Read(p []byte) (int, error) {
+ n, err := r.body.Read(p)
+ if r.remain != -1 {
+ r.remain -= int64(n)
+ }
+ return n, err
+}
+
+// Size returns the size of the object in bytes.
+// The returned value is always the same and is not affected by
+// calls to Read or Close.
+func (r *Reader) Size() int64 {
+ return r.size
+}
+
+// Remain returns the number of bytes left to read, or -1 if unknown.
+func (r *Reader) Remain() int64 {
+ return r.remain
+}
+
+// ContentType returns the content type of the object.
+func (r *Reader) ContentType() string {
+ return r.contentType
+}
diff --git a/vendor/google.golang.org/cloud/storage/storage.go b/vendor/google.golang.org/cloud/storage/storage.go
new file mode 100644
index 0000000..75f0e55
--- /dev/null
+++ b/vendor/google.golang.org/cloud/storage/storage.go
@@ -0,0 +1,1026 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package storage contains a Google Cloud Storage client.
+//
+// This package is experimental and may make backwards-incompatible changes.
+package storage // import "google.golang.org/cloud/storage"
+
+import (
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha256"
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/cloud"
+ "google.golang.org/cloud/internal/transport"
+
+ "golang.org/x/net/context"
+ "google.golang.org/api/googleapi"
+ raw "google.golang.org/api/storage/v1"
+)
+
+var (
+ ErrBucketNotExist = errors.New("storage: bucket doesn't exist")
+ ErrObjectNotExist = errors.New("storage: object doesn't exist")
+)
+
+const userAgent = "gcloud-golang-storage/20151204"
+
+const (
+ // ScopeFullControl grants permissions to manage your
+ // data and permissions in Google Cloud Storage.
+ ScopeFullControl = raw.DevstorageFullControlScope
+
+ // ScopeReadOnly grants permissions to
+ // view your data in Google Cloud Storage.
+ ScopeReadOnly = raw.DevstorageReadOnlyScope
+
+ // ScopeReadWrite grants permissions to manage your
+ // data in Google Cloud Storage.
+ ScopeReadWrite = raw.DevstorageReadWriteScope
+)
+
+// AdminClient is a client type for performing admin operations on a project's
+// buckets.
+type AdminClient struct {
+ hc *http.Client
+ raw *raw.Service
+ projectID string
+}
+
+// NewAdminClient creates a new AdminClient for a given project.
+func NewAdminClient(ctx context.Context, projectID string, opts ...cloud.ClientOption) (*AdminClient, error) {
+ c, err := NewClient(ctx, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return &AdminClient{
+ hc: c.hc,
+ raw: c.raw,
+ projectID: projectID,
+ }, nil
+}
+
+// Close closes the AdminClient.
+func (c *AdminClient) Close() error {
+ c.hc = nil
+ return nil
+}
+
+// Create creates a Bucket in the project.
+// If attrs is nil the API defaults will be used.
+func (c *AdminClient) CreateBucket(ctx context.Context, bucketName string, attrs *BucketAttrs) error {
+ var bkt *raw.Bucket
+ if attrs != nil {
+ bkt = attrs.toRawBucket()
+ } else {
+ bkt = &raw.Bucket{}
+ }
+ bkt.Name = bucketName
+ req := c.raw.Buckets.Insert(c.projectID, bkt)
+ _, err := req.Context(ctx).Do()
+ return err
+}
+
+// Delete deletes a Bucket in the project.
+func (c *AdminClient) DeleteBucket(ctx context.Context, bucketName string) error {
+ req := c.raw.Buckets.Delete(bucketName)
+ return req.Context(ctx).Do()
+}
+
+// Client is a client for interacting with Google Cloud Storage.
+type Client struct {
+ hc *http.Client
+ raw *raw.Service
+}
+
+// NewClient creates a new Google Cloud Storage client.
+// The default scope is ScopeFullControl. To use a different scope, like ScopeReadOnly, use cloud.WithScopes.
+func NewClient(ctx context.Context, opts ...cloud.ClientOption) (*Client, error) {
+ o := []cloud.ClientOption{
+ cloud.WithScopes(ScopeFullControl),
+ cloud.WithUserAgent(userAgent),
+ }
+ opts = append(o, opts...)
+ hc, _, err := transport.NewHTTPClient(ctx, opts...)
+ if err != nil {
+ return nil, fmt.Errorf("dialing: %v", err)
+ }
+ rawService, err := raw.New(hc)
+ if err != nil {
+ return nil, fmt.Errorf("storage client: %v", err)
+ }
+ return &Client{
+ hc: hc,
+ raw: rawService,
+ }, nil
+}
+
+// Close closes the Client.
+func (c *Client) Close() error {
+ c.hc = nil
+ return nil
+}
+
+// BucketHandle provides operations on a Google Cloud Storage bucket.
+// Use Client.Bucket to get a handle.
+type BucketHandle struct {
+ acl *ACLHandle
+ defaultObjectACL *ACLHandle
+
+ c *Client
+ name string
+}
+
+// Bucket returns a BucketHandle, which provides operations on the named bucket.
+// This call does not perform any network operations.
+//
+// name must contain only lowercase letters, numbers, dashes, underscores, and
+// dots. The full specification for valid bucket names can be found at:
+// https://cloud.google.com/storage/docs/bucket-naming
+func (c *Client) Bucket(name string) *BucketHandle {
+ return &BucketHandle{
+ c: c,
+ name: name,
+ acl: &ACLHandle{
+ c: c,
+ bucket: name,
+ },
+ defaultObjectACL: &ACLHandle{
+ c: c,
+ bucket: name,
+ isDefault: true,
+ },
+ }
+}
+
+// ACL returns an ACLHandle, which provides access to the bucket's access control list.
+// This controls who can list, create or overwrite the objects in a bucket.
+// This call does not perform any network operations.
+func (c *BucketHandle) ACL() *ACLHandle {
+ return c.acl
+}
+
+// DefaultObjectACL returns an ACLHandle, which provides access to the bucket's default object ACLs.
+// These ACLs are applied to newly created objects in this bucket that do not have a defined ACL.
+// This call does not perform any network operations.
+func (c *BucketHandle) DefaultObjectACL() *ACLHandle {
+ return c.defaultObjectACL
+}
+
+// Object returns an ObjectHandle, which provides operations on the named object.
+// This call does not perform any network operations.
+//
+// name must consist entirely of valid UTF-8-encoded runes. The full specification
+// for valid object names can be found at:
+// https://cloud.google.com/storage/docs/bucket-naming
+func (b *BucketHandle) Object(name string) *ObjectHandle {
+ return &ObjectHandle{
+ c: b.c,
+ bucket: b.name,
+ object: name,
+ acl: &ACLHandle{
+ c: b.c,
+ bucket: b.name,
+ object: name,
+ },
+ }
+}
+
+// TODO(jbd): Add storage.buckets.list.
+// TODO(jbd): Add storage.buckets.update.
+
+// TODO(jbd): Add storage.objects.watch.
+
+// Attrs returns the metadata for the bucket.
+func (b *BucketHandle) Attrs(ctx context.Context) (*BucketAttrs, error) {
+ resp, err := b.c.raw.Buckets.Get(b.name).Projection("full").Context(ctx).Do()
+ if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
+ return nil, ErrBucketNotExist
+ }
+ if err != nil {
+ return nil, err
+ }
+ return newBucket(resp), nil
+}
+
+// List lists objects from the bucket. You can specify a query
+// to filter the results. If q is nil, no filtering is applied.
+func (b *BucketHandle) List(ctx context.Context, q *Query) (*ObjectList, error) {
+ req := b.c.raw.Objects.List(b.name)
+ req.Projection("full")
+ if q != nil {
+ req.Delimiter(q.Delimiter)
+ req.Prefix(q.Prefix)
+ req.Versions(q.Versions)
+ req.PageToken(q.Cursor)
+ if q.MaxResults > 0 {
+ req.MaxResults(int64(q.MaxResults))
+ }
+ }
+ resp, err := req.Context(ctx).Do()
+ if err != nil {
+ return nil, err
+ }
+ objects := &ObjectList{
+ Results: make([]*ObjectAttrs, len(resp.Items)),
+ Prefixes: make([]string, len(resp.Prefixes)),
+ }
+ for i, item := range resp.Items {
+ objects.Results[i] = newObject(item)
+ }
+ for i, prefix := range resp.Prefixes {
+ objects.Prefixes[i] = prefix
+ }
+ if resp.NextPageToken != "" {
+ next := Query{}
+ if q != nil {
+ // keep the other filtering
+ // criteria if there is a query
+ next = *q
+ }
+ next.Cursor = resp.NextPageToken
+ objects.Next = &next
+ }
+ return objects, nil
+}
+
+// SignedURLOptions allows you to restrict the access to the signed URL.
+type SignedURLOptions struct {
+ // GoogleAccessID represents the authorizer of the signed URL generation.
+ // It is typically the Google service account client email address from
+ // the Google Developers Console in the form of "xxx@developer.gserviceaccount.com".
+ // Required.
+ GoogleAccessID string
+
+ // PrivateKey is the Google service account private key. It is obtainable
+ // from the Google Developers Console.
+ // At https://console.developers.google.com/project/<your-project-id>/apiui/credential,
+ // create a service account client ID or reuse one of your existing service account
+ // credentials. Click on the "Generate new P12 key" to generate and download
+ // a new private key. Once you download the P12 file, use the following command
+ // to convert it into a PEM file.
+ //
+ // $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes
+ //
+ // Provide the contents of the PEM file as a byte slice.
+ // Required.
+ PrivateKey []byte
+
+ // Method is the HTTP method to be used with the signed URL.
+ // Signed URLs can be used with GET, HEAD, PUT, and DELETE requests.
+ // Required.
+ Method string
+
+ // Expires is the expiration time on the signed URL. It must be
+ // a datetime in the future.
+ // Required.
+ Expires time.Time
+
+ // ContentType is the content type header the client must provide
+ // to use the generated signed URL.
+ // Optional.
+ ContentType string
+
+ // Headers is a list of extention headers the client must provide
+ // in order to use the generated signed URL.
+ // Optional.
+ Headers []string
+
+ // MD5 is the base64 encoded MD5 checksum of the file.
+ // If provided, the client should provide the exact value on the request
+ // header in order to use the signed URL.
+ // Optional.
+ MD5 []byte
+}
+
+// SignedURL returns a URL for the specified object. Signed URLs allow
+// the users access to a restricted resource for a limited time without having a
+// Google account or signing in. For more information about the signed
+// URLs, see https://cloud.google.com/storage/docs/accesscontrol#Signed-URLs.
+func SignedURL(bucket, name string, opts *SignedURLOptions) (string, error) {
+ if opts == nil {
+ return "", errors.New("storage: missing required SignedURLOptions")
+ }
+ if opts.GoogleAccessID == "" || opts.PrivateKey == nil {
+ return "", errors.New("storage: missing required credentials to generate a signed URL")
+ }
+ if opts.Method == "" {
+ return "", errors.New("storage: missing required method option")
+ }
+ if opts.Expires.IsZero() {
+ return "", errors.New("storage: missing required expires option")
+ }
+ key, err := parseKey(opts.PrivateKey)
+ if err != nil {
+ return "", err
+ }
+ u := &url.URL{
+ Path: fmt.Sprintf("/%s/%s", bucket, name),
+ }
+ h := sha256.New()
+ fmt.Fprintf(h, "%s\n", opts.Method)
+ fmt.Fprintf(h, "%s\n", opts.MD5)
+ fmt.Fprintf(h, "%s\n", opts.ContentType)
+ fmt.Fprintf(h, "%d\n", opts.Expires.Unix())
+ fmt.Fprintf(h, "%s", strings.Join(opts.Headers, "\n"))
+ fmt.Fprintf(h, "%s", u.String())
+ b, err := rsa.SignPKCS1v15(
+ rand.Reader,
+ key,
+ crypto.SHA256,
+ h.Sum(nil),
+ )
+ if err != nil {
+ return "", err
+ }
+ encoded := base64.StdEncoding.EncodeToString(b)
+ u.Scheme = "https"
+ u.Host = "storage.googleapis.com"
+ q := u.Query()
+ q.Set("GoogleAccessId", opts.GoogleAccessID)
+ q.Set("Expires", fmt.Sprintf("%d", opts.Expires.Unix()))
+ q.Set("Signature", string(encoded))
+ u.RawQuery = q.Encode()
+ return u.String(), nil
+}
+
+// ObjectHandle provides operations on an object in a Google Cloud Storage bucket.
+// Use BucketHandle.Object to get a handle.
+type ObjectHandle struct {
+ c *Client
+ bucket string
+ object string
+
+ acl *ACLHandle
+ conds []Condition
+}
+
+// ACL provides access to the object's access control list.
+// This controls who can read and write this object.
+// This call does not perform any network operations.
+func (o *ObjectHandle) ACL() *ACLHandle {
+ return o.acl
+}
+
+// WithConditions returns a copy of o using the provided conditions.
+func (o *ObjectHandle) WithConditions(conds ...Condition) *ObjectHandle {
+ o2 := *o
+ o2.conds = conds
+ return &o2
+}
+
+// Attrs returns meta information about the object.
+// ErrObjectNotExist will be returned if the object is not found.
+func (o *ObjectHandle) Attrs(ctx context.Context) (*ObjectAttrs, error) {
+ if !utf8.ValidString(o.object) {
+ return nil, fmt.Errorf("storage: object name %q is not valid UTF-8", o.object)
+ }
+ call := o.c.raw.Objects.Get(o.bucket, o.object).Projection("full").Context(ctx)
+ if err := applyConds("Attrs", o.conds, call); err != nil {
+ return nil, err
+ }
+ obj, err := call.Do()
+ if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
+ return nil, ErrObjectNotExist
+ }
+ if err != nil {
+ return nil, err
+ }
+ return newObject(obj), nil
+}
+
+// Update updates an object with the provided attributes.
+// All zero-value attributes are ignored.
+// ErrObjectNotExist will be returned if the object is not found.
+func (o *ObjectHandle) Update(ctx context.Context, attrs ObjectAttrs) (*ObjectAttrs, error) {
+ if !utf8.ValidString(o.object) {
+ return nil, fmt.Errorf("storage: object name %q is not valid UTF-8", o.object)
+ }
+ call := o.c.raw.Objects.Patch(o.bucket, o.object, attrs.toRawObject(o.bucket)).Projection("full").Context(ctx)
+ if err := applyConds("Update", o.conds, call); err != nil {
+ return nil, err
+ }
+ obj, err := call.Do()
+ if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
+ return nil, ErrObjectNotExist
+ }
+ if err != nil {
+ return nil, err
+ }
+ return newObject(obj), nil
+}
+
+// Delete deletes the single specified object.
+func (o *ObjectHandle) Delete(ctx context.Context) error {
+ if !utf8.ValidString(o.object) {
+ return fmt.Errorf("storage: object name %q is not valid UTF-8", o.object)
+ }
+ call := o.c.raw.Objects.Delete(o.bucket, o.object).Context(ctx)
+ if err := applyConds("Delete", o.conds, call); err != nil {
+ return err
+ }
+ err := call.Do()
+ switch e := err.(type) {
+ case nil:
+ return nil
+ case *googleapi.Error:
+ if e.Code == http.StatusNotFound {
+ return ErrObjectNotExist
+ }
+ }
+ return err
+}
+
+// CopyTo copies the object to the given dst.
+// The copied object's attributes are overwritten by attrs if non-nil.
+func (o *ObjectHandle) CopyTo(ctx context.Context, dst *ObjectHandle, attrs *ObjectAttrs) (*ObjectAttrs, error) {
+ // TODO(djd): move bucket/object name validation to a single helper func.
+ if o.bucket == "" || dst.bucket == "" {
+ return nil, errors.New("storage: the source and destination bucket names must both be non-empty")
+ }
+ if o.object == "" || dst.object == "" {
+ return nil, errors.New("storage: the source and destination object names must both be non-empty")
+ }
+ if !utf8.ValidString(o.object) {
+ return nil, fmt.Errorf("storage: object name %q is not valid UTF-8", o.object)
+ }
+ if !utf8.ValidString(dst.object) {
+ return nil, fmt.Errorf("storage: dst name %q is not valid UTF-8", dst.object)
+ }
+ var rawObject *raw.Object
+ if attrs != nil {
+ attrs.Name = dst.object
+ if attrs.ContentType == "" {
+ return nil, errors.New("storage: attrs.ContentType must be non-empty")
+ }
+ rawObject = attrs.toRawObject(dst.bucket)
+ }
+ call := o.c.raw.Objects.Copy(o.bucket, o.object, dst.bucket, dst.object, rawObject).Projection("full").Context(ctx)
+ if err := applyConds("CopyTo destination", dst.conds, call); err != nil {
+ return nil, err
+ }
+ if err := applyConds("CopyTo source", toSourceConds(o.conds), call); err != nil {
+ return nil, err
+ }
+ obj, err := call.Do()
+ if err != nil {
+ return nil, err
+ }
+ return newObject(obj), nil
+}
+
+// NewReader creates a new Reader to read the contents of the
+// object.
+// ErrObjectNotExist will be returned if the object is not found.
+func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) {
+ return o.NewRangeReader(ctx, 0, -1)
+}
+
+// NewRangeReader reads part of an object, reading at most length bytes
+// starting at the given offset. If length is negative, the object is read
+// until the end.
+func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (*Reader, error) {
+ if !utf8.ValidString(o.object) {
+ return nil, fmt.Errorf("storage: object name %q is not valid UTF-8", o.object)
+ }
+ if offset < 0 {
+ return nil, fmt.Errorf("storage: invalid offset %d < 0", offset)
+ }
+ u := &url.URL{
+ Scheme: "https",
+ Host: "storage.googleapis.com",
+ Path: fmt.Sprintf("/%s/%s", o.bucket, o.object),
+ }
+ verb := "GET"
+ if length == 0 {
+ verb = "HEAD"
+ }
+ req, err := http.NewRequest(verb, u.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+ if err := applyConds("NewReader", o.conds, objectsGetCall{req}); err != nil {
+ return nil, err
+ }
+ if length < 0 && offset > 0 {
+ req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset))
+ } else if length > 0 {
+ req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1))
+ }
+ res, err := o.c.hc.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ if res.StatusCode == http.StatusNotFound {
+ res.Body.Close()
+ return nil, ErrObjectNotExist
+ }
+ if res.StatusCode < 200 || res.StatusCode > 299 {
+ res.Body.Close()
+ return nil, fmt.Errorf("storage: can't read object %v/%v, status code: %v", o.bucket, o.object, res.Status)
+ }
+ if offset > 0 && length != 0 && res.StatusCode != http.StatusPartialContent {
+ res.Body.Close()
+ return nil, errors.New("storage: partial request not satisfied")
+ }
+ clHeader := res.Header.Get("X-Goog-Stored-Content-Length")
+ cl, err := strconv.ParseInt(clHeader, 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("storage: can't parse content length %q: %v", clHeader, err)
+ }
+ remain := res.ContentLength
+ body := res.Body
+ if length == 0 {
+ remain = 0
+ body.Close()
+ body = emptyBody
+ }
+ return &Reader{
+ body: body,
+ size: cl,
+ remain: remain,
+ contentType: res.Header.Get("Content-Type"),
+ }, nil
+}
+
+var emptyBody = ioutil.NopCloser(strings.NewReader(""))
+
+// NewWriter returns a storage Writer that writes to the GCS object
+// associated with this ObjectHandle.
+//
+// A new object will be created if an object with this name already exists.
+// Otherwise any previous object with the same name will be replaced.
+// The object will not be available (and any previous object will remain)
+// until Close has been called.
+//
+// Attributes can be set on the object by modifying the returned Writer's
+// ObjectAttrs field before the first call to Write. If no ContentType
+// attribute is specified, the content type will be automatically sniffed
+// using net/http.DetectContentType.
+//
+// It is the caller's responsibility to call Close when writing is done.
+func (o *ObjectHandle) NewWriter(ctx context.Context) *Writer {
+ return &Writer{
+ ctx: ctx,
+ o: o,
+ donec: make(chan struct{}),
+ ObjectAttrs: ObjectAttrs{Name: o.object},
+ }
+}
+
+// parseKey converts the binary contents of a private key file
+// to an *rsa.PrivateKey. It detects whether the private key is in a
+// PEM container or not. If so, it extracts the the private key
+// from PEM container before conversion. It only supports PEM
+// containers with no passphrase.
+func parseKey(key []byte) (*rsa.PrivateKey, error) {
+ if block, _ := pem.Decode(key); block != nil {
+ key = block.Bytes
+ }
+ parsedKey, err := x509.ParsePKCS8PrivateKey(key)
+ if err != nil {
+ parsedKey, err = x509.ParsePKCS1PrivateKey(key)
+ if err != nil {
+ return nil, err
+ }
+ }
+ parsed, ok := parsedKey.(*rsa.PrivateKey)
+ if !ok {
+ return nil, errors.New("oauth2: private key is invalid")
+ }
+ return parsed, nil
+}
+
+// BucketAttrs represents the metadata for a Google Cloud Storage bucket.
+type BucketAttrs struct {
+ // Name is the name of the bucket.
+ Name string
+
+ // ACL is the list of access control rules on the bucket.
+ ACL []ACLRule
+
+ // DefaultObjectACL is the list of access controls to
+ // apply to new objects when no object ACL is provided.
+ DefaultObjectACL []ACLRule
+
+ // Location is the location of the bucket. It defaults to "US".
+ Location string
+
+ // MetaGeneration is the metadata generation of the bucket.
+ MetaGeneration int64
+
+ // StorageClass is the storage class of the bucket. This defines
+ // how objects in the bucket are stored and determines the SLA
+ // and the cost of storage. Typical values are "STANDARD" and
+ // "DURABLE_REDUCED_AVAILABILITY". Defaults to "STANDARD".
+ StorageClass string
+
+ // Created is the creation time of the bucket.
+ Created time.Time
+}
+
+func newBucket(b *raw.Bucket) *BucketAttrs {
+ if b == nil {
+ return nil
+ }
+ bucket := &BucketAttrs{
+ Name: b.Name,
+ Location: b.Location,
+ MetaGeneration: b.Metageneration,
+ StorageClass: b.StorageClass,
+ Created: convertTime(b.TimeCreated),
+ }
+ acl := make([]ACLRule, len(b.Acl))
+ for i, rule := range b.Acl {
+ acl[i] = ACLRule{
+ Entity: ACLEntity(rule.Entity),
+ Role: ACLRole(rule.Role),
+ }
+ }
+ bucket.ACL = acl
+ objACL := make([]ACLRule, len(b.DefaultObjectAcl))
+ for i, rule := range b.DefaultObjectAcl {
+ objACL[i] = ACLRule{
+ Entity: ACLEntity(rule.Entity),
+ Role: ACLRole(rule.Role),
+ }
+ }
+ bucket.DefaultObjectACL = objACL
+ return bucket
+}
+
+func toRawObjectACL(oldACL []ACLRule) []*raw.ObjectAccessControl {
+ var acl []*raw.ObjectAccessControl
+ if len(oldACL) > 0 {
+ acl = make([]*raw.ObjectAccessControl, len(oldACL))
+ for i, rule := range oldACL {
+ acl[i] = &raw.ObjectAccessControl{
+ Entity: string(rule.Entity),
+ Role: string(rule.Role),
+ }
+ }
+ }
+ return acl
+}
+
+// toRawBucket copies the editable attribute from b to the raw library's Bucket type.
+func (b *BucketAttrs) toRawBucket() *raw.Bucket {
+ var acl []*raw.BucketAccessControl
+ if len(b.ACL) > 0 {
+ acl = make([]*raw.BucketAccessControl, len(b.ACL))
+ for i, rule := range b.ACL {
+ acl[i] = &raw.BucketAccessControl{
+ Entity: string(rule.Entity),
+ Role: string(rule.Role),
+ }
+ }
+ }
+ dACL := toRawObjectACL(b.DefaultObjectACL)
+ return &raw.Bucket{
+ Name: b.Name,
+ DefaultObjectAcl: dACL,
+ Location: b.Location,
+ StorageClass: b.StorageClass,
+ Acl: acl,
+ }
+}
+
+// toRawObject copies the editable attributes from o to the raw library's Object type.
+func (o ObjectAttrs) toRawObject(bucket string) *raw.Object {
+ acl := toRawObjectACL(o.ACL)
+ return &raw.Object{
+ Bucket: bucket,
+ Name: o.Name,
+ ContentType: o.ContentType,
+ ContentEncoding: o.ContentEncoding,
+ ContentLanguage: o.ContentLanguage,
+ CacheControl: o.CacheControl,
+ ContentDisposition: o.ContentDisposition,
+ Acl: acl,
+ Metadata: o.Metadata,
+ }
+}
+
+// ObjectAttrs represents the metadata for a Google Cloud Storage (GCS) object.
+type ObjectAttrs struct {
+ // Bucket is the name of the bucket containing this GCS object.
+ // This field is read-only.
+ Bucket string
+
+ // Name is the name of the object within the bucket.
+ // This field is read-only.
+ Name string
+
+ // ContentType is the MIME type of the object's content.
+ ContentType string
+
+ // ContentLanguage is the content language of the object's content.
+ ContentLanguage string
+
+ // CacheControl is the Cache-Control header to be sent in the response
+ // headers when serving the object data.
+ CacheControl string
+
+ // ACL is the list of access control rules for the object.
+ ACL []ACLRule
+
+ // Owner is the owner of the object. This field is read-only.
+ //
+ // If non-zero, it is in the form of "user-<userId>".
+ Owner string
+
+ // Size is the length of the object's content. This field is read-only.
+ Size int64
+
+ // ContentEncoding is the encoding of the object's content.
+ ContentEncoding string
+
+ // ContentDisposition is the optional Content-Disposition header of the object
+ // sent in the response headers.
+ ContentDisposition string
+
+ // MD5 is the MD5 hash of the object's content. This field is read-only.
+ MD5 []byte
+
+ // CRC32C is the CRC32 checksum of the object's content using
+ // the Castagnoli93 polynomial. This field is read-only.
+ CRC32C uint32
+
+ // MediaLink is an URL to the object's content. This field is read-only.
+ MediaLink string
+
+ // Metadata represents user-provided metadata, in key/value pairs.
+ // It can be nil if no metadata is provided.
+ Metadata map[string]string
+
+ // Generation is the generation number of the object's content.
+ // This field is read-only.
+ Generation int64
+
+ // MetaGeneration is the version of the metadata for this
+ // object at this generation. This field is used for preconditions
+ // and for detecting changes in metadata. A metageneration number
+ // is only meaningful in the context of a particular generation
+ // of a particular object. This field is read-only.
+ MetaGeneration int64
+
+ // StorageClass is the storage class of the bucket.
+ // This value defines how objects in the bucket are stored and
+ // determines the SLA and the cost of storage. Typical values are
+ // "STANDARD" and "DURABLE_REDUCED_AVAILABILITY".
+ // It defaults to "STANDARD". This field is read-only.
+ StorageClass string
+
+ // Created is the time the object was created. This field is read-only.
+ Created time.Time
+
+ // Deleted is the time the object was deleted.
+ // If not deleted, it is the zero value. This field is read-only.
+ Deleted time.Time
+
+ // Updated is the creation or modification time of the object.
+ // For buckets with versioning enabled, changing an object's
+ // metadata does not change this property. This field is read-only.
+ Updated time.Time
+}
+
+// convertTime converts a time in RFC3339 format to time.Time.
+// If any error occurs in parsing, the zero-value time.Time is silently returned.
+func convertTime(t string) time.Time {
+ var r time.Time
+ if t != "" {
+ r, _ = time.Parse(time.RFC3339, t)
+ }
+ return r
+}
+
+func newObject(o *raw.Object) *ObjectAttrs {
+ if o == nil {
+ return nil
+ }
+ acl := make([]ACLRule, len(o.Acl))
+ for i, rule := range o.Acl {
+ acl[i] = ACLRule{
+ Entity: ACLEntity(rule.Entity),
+ Role: ACLRole(rule.Role),
+ }
+ }
+ owner := ""
+ if o.Owner != nil {
+ owner = o.Owner.Entity
+ }
+ md5, _ := base64.StdEncoding.DecodeString(o.Md5Hash)
+ var crc32c uint32
+ d, err := base64.StdEncoding.DecodeString(o.Crc32c)
+ if err == nil && len(d) == 4 {
+ crc32c = uint32(d[0])<<24 + uint32(d[1])<<16 + uint32(d[2])<<8 + uint32(d[3])
+ }
+ return &ObjectAttrs{
+ Bucket: o.Bucket,
+ Name: o.Name,
+ ContentType: o.ContentType,
+ ContentLanguage: o.ContentLanguage,
+ CacheControl: o.CacheControl,
+ ACL: acl,
+ Owner: owner,
+ ContentEncoding: o.ContentEncoding,
+ Size: int64(o.Size),
+ MD5: md5,
+ CRC32C: crc32c,
+ MediaLink: o.MediaLink,
+ Metadata: o.Metadata,
+ Generation: o.Generation,
+ MetaGeneration: o.Metageneration,
+ StorageClass: o.StorageClass,
+ Created: convertTime(o.TimeCreated),
+ Deleted: convertTime(o.TimeDeleted),
+ Updated: convertTime(o.Updated),
+ }
+}
+
+// Query represents a query to filter objects from a bucket.
+type Query struct {
+ // Delimiter returns results in a directory-like fashion.
+ // Results will contain only objects whose names, aside from the
+ // prefix, do not contain delimiter. Objects whose names,
+ // aside from the prefix, contain delimiter will have their name,
+ // truncated after the delimiter, returned in prefixes.
+ // Duplicate prefixes are omitted.
+ // Optional.
+ Delimiter string
+
+ // Prefix is the prefix filter to query objects
+ // whose names begin with this prefix.
+ // Optional.
+ Prefix string
+
+ // Versions indicates whether multiple versions of the same
+ // object will be included in the results.
+ Versions bool
+
+ // Cursor is a previously-returned page token
+ // representing part of the larger set of results to view.
+ // Optional.
+ Cursor string
+
+ // MaxResults is the maximum number of items plus prefixes
+ // to return. As duplicate prefixes are omitted,
+ // fewer total results may be returned than requested.
+ // The default page limit is used if it is negative or zero.
+ MaxResults int
+}
+
+// ObjectList represents a list of objects returned from a bucket List call.
+type ObjectList struct {
+ // Results represent a list of object results.
+ Results []*ObjectAttrs
+
+ // Next is the continuation query to retrieve more
+ // results with the same filtering criteria. If there
+ // are no more results to retrieve, it is nil.
+ Next *Query
+
+ // Prefixes represents prefixes of objects
+ // matching-but-not-listed up to and including
+ // the requested delimiter.
+ Prefixes []string
+}
+
+// contentTyper implements ContentTyper to enable an
+// io.ReadCloser to specify its MIME type.
+type contentTyper struct {
+ io.Reader
+ t string
+}
+
+func (c *contentTyper) ContentType() string {
+ return c.t
+}
+
+// A Condition constrains methods to act on specific generations of
+// resources.
+//
+// Not all conditions or combinations of conditions are applicable to
+// all methods.
+type Condition interface {
+ // method is the high-level ObjectHandle method name, for
+ // error messages. call is the call object to modify.
+ modifyCall(method string, call interface{}) error
+}
+
+// applyConds modifies the provided call using the conditions in conds.
+// call is something that quacks like a *raw.WhateverCall.
+func applyConds(method string, conds []Condition, call interface{}) error {
+ for _, cond := range conds {
+ if err := cond.modifyCall(method, call); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// toSourceConds returns a slice of Conditions derived from Conds that instead
+// function on the equivalent Source methods of a call.
+func toSourceConds(conds []Condition) []Condition {
+ out := make([]Condition, 0, len(conds))
+ for _, c := range conds {
+ switch c := c.(type) {
+ case genCond:
+ var m string
+ if strings.HasPrefix(c.method, "If") {
+ m = "IfSource" + c.method[2:]
+ } else {
+ m = "Source" + c.method
+ }
+ out = append(out, genCond{method: m, val: c.val})
+ default:
+ // NOTE(djd): If the message from unsupportedCond becomes
+ // confusing, we'll need to find a way for Conditions to
+ // identify themselves.
+ out = append(out, unsupportedCond{})
+ }
+ }
+ return out
+}
+
+func Generation(gen int64) Condition { return genCond{"Generation", gen} }
+func IfGenerationMatch(gen int64) Condition { return genCond{"IfGenerationMatch", gen} }
+func IfGenerationNotMatch(gen int64) Condition { return genCond{"IfGenerationNotMatch", gen} }
+func IfMetaGenerationMatch(gen int64) Condition { return genCond{"IfMetagenerationMatch", gen} }
+func IfMetaGenerationNotMatch(gen int64) Condition { return genCond{"IfMetagenerationNotMatch", gen} }
+
+type genCond struct {
+ method string
+ val int64
+}
+
+func (g genCond) modifyCall(srcMethod string, call interface{}) error {
+ rv := reflect.ValueOf(call)
+ meth := rv.MethodByName(g.method)
+ if !meth.IsValid() {
+ return fmt.Errorf("%s: condition %s not supported", srcMethod, g.method)
+ }
+ meth.Call([]reflect.Value{reflect.ValueOf(g.val)})
+ return nil
+}
+
+type unsupportedCond struct{}
+
+func (unsupportedCond) modifyCall(srcMethod string, call interface{}) error {
+ return fmt.Errorf("%s: condition not supported", srcMethod)
+}
+
+func appendParam(req *http.Request, k, v string) {
+ sep := ""
+ if req.URL.RawQuery != "" {
+ sep = "&"
+ }
+ req.URL.RawQuery += sep + url.QueryEscape(k) + "=" + url.QueryEscape(v)
+}
+
+// objectsGetCall wraps an *http.Request for an object fetch call, but adds the methods
+// that modifyCall searches for by name. (the same names as the raw, auto-generated API)
+type objectsGetCall struct{ req *http.Request }
+
+func (c objectsGetCall) Generation(gen int64) {
+ appendParam(c.req, "generation", fmt.Sprint(gen))
+}
+func (c objectsGetCall) IfGenerationMatch(gen int64) {
+ appendParam(c.req, "ifGenerationMatch", fmt.Sprint(gen))
+}
+func (c objectsGetCall) IfGenerationNotMatch(gen int64) {
+ appendParam(c.req, "ifGenerationNotMatch", fmt.Sprint(gen))
+}
+func (c objectsGetCall) IfMetagenerationMatch(gen int64) {
+ appendParam(c.req, "ifMetagenerationMatch", fmt.Sprint(gen))
+}
+func (c objectsGetCall) IfMetagenerationNotMatch(gen int64) {
+ appendParam(c.req, "ifMetagenerationNotMatch", fmt.Sprint(gen))
+}
diff --git a/vendor/google.golang.org/cloud/storage/writer.go b/vendor/google.golang.org/cloud/storage/writer.go
new file mode 100644
index 0000000..60937c0
--- /dev/null
+++ b/vendor/google.golang.org/cloud/storage/writer.go
@@ -0,0 +1,129 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+ "fmt"
+ "io"
+ "unicode/utf8"
+
+ "golang.org/x/net/context"
+ "google.golang.org/api/googleapi"
+ raw "google.golang.org/api/storage/v1"
+)
+
+// A Writer writes a Cloud Storage object.
+type Writer struct {
+ // ObjectAttrs are optional attributes to set on the object. Any attributes
+ // must be initialized before the first Write call. Nil or zero-valued
+ // attributes are ignored.
+ ObjectAttrs
+
+ ctx context.Context
+ o *ObjectHandle
+
+ opened bool
+ pw *io.PipeWriter
+
+ donec chan struct{} // closed after err and obj are set.
+ err error
+ obj *ObjectAttrs
+}
+
+func (w *Writer) open() error {
+ attrs := w.ObjectAttrs
+ // Check the developer didn't change the object Name (this is unfortunate, but
+ // we don't want to store an object under the wrong name).
+ if attrs.Name != w.o.object {
+ return fmt.Errorf("storage: Writer.Name %q does not match object name %q", attrs.Name, w.o.object)
+ }
+ if !utf8.ValidString(attrs.Name) {
+ return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name)
+ }
+ pr, pw := io.Pipe()
+ w.pw = pw
+ w.opened = true
+
+ var mediaOpts []googleapi.MediaOption
+ if c := attrs.ContentType; c != "" {
+ mediaOpts = append(mediaOpts, googleapi.ContentType(c))
+ }
+
+ go func() {
+ defer close(w.donec)
+
+ call := w.o.c.raw.Objects.Insert(w.o.bucket, attrs.toRawObject(w.o.bucket)).
+ Media(pr, mediaOpts...).
+ Projection("full").
+ Context(w.ctx)
+
+ var resp *raw.Object
+ err := applyConds("NewWriter", w.o.conds, call)
+ if err == nil {
+ resp, err = call.Do()
+ }
+ if err != nil {
+ w.err = err
+ pr.CloseWithError(w.err)
+ return
+ }
+ w.obj = newObject(resp)
+ }()
+ return nil
+}
+
+// Write appends to w.
+func (w *Writer) Write(p []byte) (n int, err error) {
+ if w.err != nil {
+ return 0, w.err
+ }
+ if !w.opened {
+ if err := w.open(); err != nil {
+ return 0, err
+ }
+ }
+ return w.pw.Write(p)
+}
+
+// Close completes the write operation and flushes any buffered data.
+// If Close doesn't return an error, metadata about the written object
+// can be retrieved by calling Object.
+func (w *Writer) Close() error {
+ if !w.opened {
+ if err := w.open(); err != nil {
+ return err
+ }
+ }
+ if err := w.pw.Close(); err != nil {
+ return err
+ }
+ <-w.donec
+ return w.err
+}
+
+// CloseWithError aborts the write operation with the provided error.
+// CloseWithError always returns nil.
+func (w *Writer) CloseWithError(err error) error {
+ if !w.opened {
+ return nil
+ }
+ return w.pw.CloseWithError(err)
+}
+
+// ObjectAttrs returns metadata about a successfully-written object.
+// It's only valid to call it after Close returns nil.
+func (w *Writer) Attrs() *ObjectAttrs {
+ return w.obj
+}
diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md
new file mode 100644
index 0000000..36cd6f7
--- /dev/null
+++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md
@@ -0,0 +1,46 @@
+# How to contribute
+
+We definitely welcome patches and contribution to grpc! Here are some guidelines
+and information about how to do so.
+
+## Sending patches
+
+### Getting started
+
+1. Check out the code:
+
+ $ go get google.golang.org/grpc
+ $ cd $GOPATH/src/google.golang.org/grpc
+
+1. Create a fork of the grpc-go repository.
+1. Add your fork as a remote:
+
+ $ git remote add fork git@github.com:$YOURGITHUBUSERNAME/grpc-go.git
+
+1. Make changes, commit them.
+1. Run the test suite:
+
+ $ make test
+
+1. Push your changes to your fork:
+
+ $ git push fork ...
+
+1. Open a pull request.
+
+## Legal requirements
+
+In order to protect both you and ourselves, you will need to sign the
+[Contributor License Agreement](https://cla.developers.google.com/clas).
+
+## Filing Issues
+When filing an issue, make sure to answer these five questions:
+
+1. What version of Go are you using (`go version`)?
+2. What operating system and processor architecture are you using?
+3. What did you do?
+4. What did you expect to see?
+5. What did you see instead?
+
+### Contributing code
+Unless otherwise noted, the Go source files are distributed under the BSD-style license found in the LICENSE file.
diff --git a/vendor/google.golang.org/grpc/LICENSE b/vendor/google.golang.org/grpc/LICENSE
new file mode 100644
index 0000000..f4988b4
--- /dev/null
+++ b/vendor/google.golang.org/grpc/LICENSE
@@ -0,0 +1,28 @@
+Copyright 2014, Google Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile
new file mode 100644
index 0000000..d26eb90
--- /dev/null
+++ b/vendor/google.golang.org/grpc/Makefile
@@ -0,0 +1,51 @@
+all: test testrace
+
+deps:
+ go get -d -v google.golang.org/grpc/...
+
+updatedeps:
+ go get -d -v -u -f google.golang.org/grpc/...
+
+testdeps:
+ go get -d -v -t google.golang.org/grpc/...
+
+updatetestdeps:
+ go get -d -v -t -u -f google.golang.org/grpc/...
+
+build: deps
+ go build google.golang.org/grpc/...
+
+proto:
+ @ if ! which protoc > /dev/null; then \
+ echo "error: protoc not installed" >&2; \
+ exit 1; \
+ fi
+ go get -u -v github.com/golang/protobuf/protoc-gen-go
+ for file in $$(git ls-files '*.proto'); do \
+ protoc -I $$(dirname $$file) --go_out=plugins=grpc:$$(dirname $$file) $$file; \
+ done
+
+test: testdeps
+ go test -v -cpu 1,4 google.golang.org/grpc/...
+
+testrace: testdeps
+ go test -v -race -cpu 1,4 google.golang.org/grpc/...
+
+clean:
+ go clean -i google.golang.org/grpc/...
+
+coverage: testdeps
+ ./coverage.sh --coveralls
+
+.PHONY: \
+ all \
+ deps \
+ updatedeps \
+ testdeps \
+ updatetestdeps \
+ build \
+ proto \
+ test \
+ testrace \
+ clean \
+ coverage
diff --git a/vendor/google.golang.org/grpc/PATENTS b/vendor/google.golang.org/grpc/PATENTS
new file mode 100644
index 0000000..69b4795
--- /dev/null
+++ b/vendor/google.golang.org/grpc/PATENTS
@@ -0,0 +1,22 @@
+Additional IP Rights Grant (Patents)
+
+"This implementation" means the copyrightable works distributed by
+Google as part of the gRPC project.
+
+Google hereby grants to You a perpetual, worldwide, non-exclusive,
+no-charge, royalty-free, irrevocable (except as stated in this section)
+patent license to make, have made, use, offer to sell, sell, import,
+transfer and otherwise run, modify and propagate the contents of this
+implementation of gRPC, where such license applies only to those patent
+claims, both currently owned or controlled by Google and acquired in
+the future, licensable by Google that are necessarily infringed by this
+implementation of gRPC. This grant does not include claims that would be
+infringed only as a consequence of further modification of this
+implementation. If you or your agent or exclusive licensee institute or
+order or agree to the institution of patent litigation against any
+entity (including a cross-claim or counterclaim in a lawsuit) alleging
+that this implementation of gRPC or any code incorporated within this
+implementation of gRPC constitutes direct or contributory patent
+infringement, or inducement of patent infringement, then any patent
+rights granted to you under this License for this implementation of gRPC
+shall terminate as of the date such litigation is filed.
diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md
new file mode 100644
index 0000000..90e9453
--- /dev/null
+++ b/vendor/google.golang.org/grpc/README.md
@@ -0,0 +1,32 @@
+#gRPC-Go
+
+[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc)
+
+The Go implementation of [gRPC](http://www.grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start](http://www.grpc.io/docs/) guide.
+
+Installation
+------------
+
+To install this package, you need to install Go and setup your Go workspace on your computer. The simplest way to install the library is to run:
+
+```
+$ go get google.golang.org/grpc
+```
+
+Prerequisites
+-------------
+
+This requires Go 1.5 or later .
+
+Constraints
+-----------
+The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](http://godoc.org/google.golang.org/grpc?imports), you need a discussion with gRPC-Go authors and consultants.
+
+Documentation
+-------------
+See [API documentation](https://godoc.org/google.golang.org/grpc) for package and API descriptions and find examples in the [examples directory](examples/).
+
+Status
+------
+Beta release
+
diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go
new file mode 100644
index 0000000..52f4f10
--- /dev/null
+++ b/vendor/google.golang.org/grpc/backoff.go
@@ -0,0 +1,80 @@
+package grpc
+
+import (
+ "math/rand"
+ "time"
+)
+
+// DefaultBackoffConfig uses values specified for backoff in
+// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
+var (
+ DefaultBackoffConfig = BackoffConfig{
+ MaxDelay: 120 * time.Second,
+ baseDelay: 1.0 * time.Second,
+ factor: 1.6,
+ jitter: 0.2,
+ }
+)
+
+// backoffStrategy defines the methodology for backing off after a grpc
+// connection failure.
+//
+// This is unexported until the gRPC project decides whether or not to allow
+// alternative backoff strategies. Once a decision is made, this type and its
+// method may be exported.
+type backoffStrategy interface {
+ // backoff returns the amount of time to wait before the next retry given
+ // the number of consecutive failures.
+ backoff(retries int) time.Duration
+}
+
+// BackoffConfig defines the parameters for the default gRPC backoff strategy.
+type BackoffConfig struct {
+ // MaxDelay is the upper bound of backoff delay.
+ MaxDelay time.Duration
+
+ // TODO(stevvooe): The following fields are not exported, as allowing
+ // changes would violate the current gRPC specification for backoff. If
+ // gRPC decides to allow more interesting backoff strategies, these fields
+ // may be opened up in the future.
+
+ // baseDelay is the amount of time to wait before retrying after the first
+ // failure.
+ baseDelay time.Duration
+
+ // factor is applied to the backoff after each retry.
+ factor float64
+
+ // jitter provides a range to randomize backoff delays.
+ jitter float64
+}
+
+func setDefaults(bc *BackoffConfig) {
+ md := bc.MaxDelay
+ *bc = DefaultBackoffConfig
+
+ if md > 0 {
+ bc.MaxDelay = md
+ }
+}
+
+func (bc BackoffConfig) backoff(retries int) (t time.Duration) {
+ if retries == 0 {
+ return bc.baseDelay
+ }
+ backoff, max := float64(bc.baseDelay), float64(bc.MaxDelay)
+ for backoff < max && retries > 0 {
+ backoff *= bc.factor
+ retries--
+ }
+ if backoff > max {
+ backoff = max
+ }
+ // Randomize backoff delays so that if a cluster of requests start at
+ // the same time, they won't operate in lockstep.
+ backoff *= 1 + bc.jitter*(rand.Float64()*2-1)
+ if backoff < 0 {
+ return 0
+ }
+ return time.Duration(backoff)
+}
diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go
new file mode 100644
index 0000000..348bf97
--- /dev/null
+++ b/vendor/google.golang.org/grpc/balancer.go
@@ -0,0 +1,340 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+package grpc
+
+import (
+ "fmt"
+ "sync"
+
+ "golang.org/x/net/context"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/naming"
+ "google.golang.org/grpc/transport"
+)
+
+// Address represents a server the client connects to.
+// This is the EXPERIMENTAL API and may be changed or extended in the future.
+type Address struct {
+ // Addr is the server address on which a connection will be established.
+ Addr string
+ // Metadata is the information associated with Addr, which may be used
+ // to make load balancing decision.
+ Metadata interface{}
+}
+
+// BalancerGetOptions configures a Get call.
+// This is the EXPERIMENTAL API and may be changed or extended in the future.
+type BalancerGetOptions struct {
+ // BlockingWait specifies whether Get should block when there is no
+ // connected address.
+ BlockingWait bool
+}
+
+// Balancer chooses network addresses for RPCs.
+// This is the EXPERIMENTAL API and may be changed or extended in the future.
+type Balancer interface {
+ // Start does the initialization work to bootstrap a Balancer. For example,
+ // this function may start the name resolution and watch the updates. It will
+ // be called when dialing.
+ Start(target string) error
+ // Up informs the Balancer that gRPC has a connection to the server at
+ // addr. It returns down which is called once the connection to addr gets
+ // lost or closed.
+ // TODO: It is not clear how to construct and take advantage the meaningful error
+ // parameter for down. Need realistic demands to guide.
+ Up(addr Address) (down func(error))
+ // Get gets the address of a server for the RPC corresponding to ctx.
+ // i) If it returns a connected address, gRPC internals issues the RPC on the
+ // connection to this address;
+ // ii) If it returns an address on which the connection is under construction
+ // (initiated by Notify(...)) but not connected, gRPC internals
+ // * fails RPC if the RPC is fail-fast and connection is in the TransientFailure or
+ // Shutdown state;
+ // or
+ // * issues RPC on the connection otherwise.
+ // iii) If it returns an address on which the connection does not exist, gRPC
+ // internals treats it as an error and will fail the corresponding RPC.
+ //
+ // Therefore, the following is the recommended rule when writing a custom Balancer.
+ // If opts.BlockingWait is true, it should return a connected address or
+ // block if there is no connected address. It should respect the timeout or
+ // cancellation of ctx when blocking. If opts.BlockingWait is false (for fail-fast
+ // RPCs), it should return an address it has notified via Notify(...) immediately
+ // instead of blocking.
+ //
+ // The function returns put which is called once the rpc has completed or failed.
+ // put can collect and report RPC stats to a remote load balancer. gRPC internals
+ // will try to call this again if err is non-nil (unless err is ErrClientConnClosing).
+ //
+ // TODO: Add other non-recoverable errors?
+ Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error)
+ // Notify returns a channel that is used by gRPC internals to watch the addresses
+ // gRPC needs to connect. The addresses might be from a name resolver or remote
+ // load balancer. gRPC internals will compare it with the existing connected
+ // addresses. If the address Balancer notified is not in the existing connected
+ // addresses, gRPC starts to connect the address. If an address in the existing
+ // connected addresses is not in the notification list, the corresponding connection
+ // is shutdown gracefully. Otherwise, there are no operations to take. Note that
+ // the Address slice must be the full list of the Addresses which should be connected.
+ // It is NOT delta.
+ Notify() <-chan []Address
+ // Close shuts down the balancer.
+ Close() error
+}
+
+// downErr implements net.Error. It is constructed by gRPC internals and passed to the down
+// call of Balancer.
+type downErr struct {
+ timeout bool
+ temporary bool
+ desc string
+}
+
+func (e downErr) Error() string { return e.desc }
+func (e downErr) Timeout() bool { return e.timeout }
+func (e downErr) Temporary() bool { return e.temporary }
+
+func downErrorf(timeout, temporary bool, format string, a ...interface{}) downErr {
+ return downErr{
+ timeout: timeout,
+ temporary: temporary,
+ desc: fmt.Sprintf(format, a...),
+ }
+}
+
+// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch
+// the name resolution updates and updates the addresses available correspondingly.
+func RoundRobin(r naming.Resolver) Balancer {
+ return &roundRobin{r: r}
+}
+
+type roundRobin struct {
+ r naming.Resolver
+ w naming.Watcher
+ open []Address // all the addresses the client should potentially connect
+ mu sync.Mutex
+ addrCh chan []Address // the channel to notify gRPC internals the list of addresses the client should connect to.
+ connected []Address // all the connected addresses
+ next int // index of the next address to return for Get()
+ waitCh chan struct{} // the channel to block when there is no connected address available
+ done bool // The Balancer is closed.
+}
+
+func (rr *roundRobin) watchAddrUpdates() error {
+ updates, err := rr.w.Next()
+ if err != nil {
+ grpclog.Println("grpc: the naming watcher stops working due to %v.", err)
+ return err
+ }
+ rr.mu.Lock()
+ defer rr.mu.Unlock()
+ for _, update := range updates {
+ addr := Address{
+ Addr: update.Addr,
+ }
+ switch update.Op {
+ case naming.Add:
+ var exist bool
+ for _, v := range rr.open {
+ if addr == v {
+ exist = true
+ grpclog.Println("grpc: The name resolver wanted to add an existing address: ", addr)
+ break
+ }
+ }
+ if exist {
+ continue
+ }
+ rr.open = append(rr.open, addr)
+ case naming.Delete:
+ for i, v := range rr.open {
+ if v == addr {
+ copy(rr.open[i:], rr.open[i+1:])
+ rr.open = rr.open[:len(rr.open)-1]
+ break
+ }
+ }
+ default:
+ grpclog.Println("Unknown update.Op ", update.Op)
+ }
+ }
+ // Make a copy of rr.open and write it onto rr.addrCh so that gRPC internals gets notified.
+ open := make([]Address, len(rr.open), len(rr.open))
+ copy(open, rr.open)
+ if rr.done {
+ return ErrClientConnClosing
+ }
+ rr.addrCh <- open
+ return nil
+}
+
+func (rr *roundRobin) Start(target string) error {
+ if rr.r == nil {
+ // If there is no name resolver installed, it is not needed to
+ // do name resolution. In this case, rr.addrCh stays nil.
+ return nil
+ }
+ w, err := rr.r.Resolve(target)
+ if err != nil {
+ return err
+ }
+ rr.w = w
+ rr.addrCh = make(chan []Address)
+ go func() {
+ for {
+ if err := rr.watchAddrUpdates(); err != nil {
+ return
+ }
+ }
+ }()
+ return nil
+}
+
+// Up appends addr to the end of rr.connected and sends notification if there
+// are pending Get() calls.
+func (rr *roundRobin) Up(addr Address) func(error) {
+ rr.mu.Lock()
+ defer rr.mu.Unlock()
+ for _, a := range rr.connected {
+ if a == addr {
+ return nil
+ }
+ }
+ rr.connected = append(rr.connected, addr)
+ if len(rr.connected) == 1 {
+ // addr is only one available. Notify the Get() callers who are blocking.
+ if rr.waitCh != nil {
+ close(rr.waitCh)
+ rr.waitCh = nil
+ }
+ }
+ return func(err error) {
+ rr.down(addr, err)
+ }
+}
+
+// down removes addr from rr.connected and moves the remaining addrs forward.
+func (rr *roundRobin) down(addr Address, err error) {
+ rr.mu.Lock()
+ defer rr.mu.Unlock()
+ for i, a := range rr.connected {
+ if a == addr {
+ copy(rr.connected[i:], rr.connected[i+1:])
+ rr.connected = rr.connected[:len(rr.connected)-1]
+ return
+ }
+ }
+}
+
+// Get returns the next addr in the rotation.
+func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) {
+ var ch chan struct{}
+ rr.mu.Lock()
+ if rr.done {
+ rr.mu.Unlock()
+ err = ErrClientConnClosing
+ return
+ }
+ if rr.next >= len(rr.connected) {
+ rr.next = 0
+ }
+ if len(rr.connected) > 0 {
+ addr = rr.connected[rr.next]
+ rr.next++
+ rr.mu.Unlock()
+ return
+ }
+ // There is no address available. Wait on rr.waitCh.
+ // TODO(zhaoq): Handle the case when opts.BlockingWait is false.
+ if rr.waitCh == nil {
+ ch = make(chan struct{})
+ rr.waitCh = ch
+ } else {
+ ch = rr.waitCh
+ }
+ rr.mu.Unlock()
+ for {
+ select {
+ case <-ctx.Done():
+ err = transport.ContextErr(ctx.Err())
+ return
+ case <-ch:
+ rr.mu.Lock()
+ if rr.done {
+ rr.mu.Unlock()
+ err = ErrClientConnClosing
+ return
+ }
+ if len(rr.connected) == 0 {
+ // The newly added addr got removed by Down() again.
+ if rr.waitCh == nil {
+ ch = make(chan struct{})
+ rr.waitCh = ch
+ } else {
+ ch = rr.waitCh
+ }
+ rr.mu.Unlock()
+ continue
+ }
+ if rr.next >= len(rr.connected) {
+ rr.next = 0
+ }
+ addr = rr.connected[rr.next]
+ rr.next++
+ rr.mu.Unlock()
+ return
+ }
+ }
+}
+
+func (rr *roundRobin) Notify() <-chan []Address {
+ return rr.addrCh
+}
+
+func (rr *roundRobin) Close() error {
+ rr.mu.Lock()
+ defer rr.mu.Unlock()
+ rr.done = true
+ if rr.w != nil {
+ rr.w.Close()
+ }
+ if rr.waitCh != nil {
+ close(rr.waitCh)
+ rr.waitCh = nil
+ }
+ if rr.addrCh != nil {
+ close(rr.addrCh)
+ }
+ return nil
+}
diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go
new file mode 100644
index 0000000..d6d993b
--- /dev/null
+++ b/vendor/google.golang.org/grpc/call.go
@@ -0,0 +1,215 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+package grpc
+
+import (
+ "bytes"
+ "io"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/net/trace"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/transport"
+)
+
+// recvResponse receives and parses an RPC response.
+// On error, it returns the error and indicates whether the call should be retried.
+//
+// TODO(zhaoq): Check whether the received message sequence is valid.
+func recvResponse(dopts dialOptions, t transport.ClientTransport, c *callInfo, stream *transport.Stream, reply interface{}) error {
+ // Try to acquire header metadata from the server if there is any.
+ var err error
+ c.headerMD, err = stream.Header()
+ if err != nil {
+ return err
+ }
+ p := &parser{r: stream}
+ for {
+ if err = recv(p, dopts.codec, stream, dopts.dc, reply); err != nil {
+ if err == io.EOF {
+ break
+ }
+ return err
+ }
+ }
+ c.trailerMD = stream.Trailer()
+ return nil
+}
+
+// sendRequest writes out various information of an RPC such as Context and Message.
+func sendRequest(ctx context.Context, codec Codec, compressor Compressor, callHdr *transport.CallHdr, t transport.ClientTransport, args interface{}, opts *transport.Options) (_ *transport.Stream, err error) {
+ stream, err := t.NewStream(ctx, callHdr)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ if err != nil {
+ if _, ok := err.(transport.ConnectionError); !ok {
+ t.CloseStream(stream, err)
+ }
+ }
+ }()
+ var cbuf *bytes.Buffer
+ if compressor != nil {
+ cbuf = new(bytes.Buffer)
+ }
+ outBuf, err := encode(codec, args, compressor, cbuf)
+ if err != nil {
+ return nil, transport.StreamErrorf(codes.Internal, "grpc: %v", err)
+ }
+ err = t.Write(stream, outBuf, opts)
+ if err != nil {
+ return nil, err
+ }
+ // Sent successfully.
+ return stream, nil
+}
+
+// Invoke sends the RPC request on the wire and returns after response is received.
+// Invoke is called by generated code. Also users can call Invoke directly when it
+// is really needed in their use cases.
+func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) (err error) {
+ var c callInfo
+ for _, o := range opts {
+ if err := o.before(&c); err != nil {
+ return toRPCErr(err)
+ }
+ }
+ defer func() {
+ for _, o := range opts {
+ o.after(&c)
+ }
+ }()
+ if EnableTracing {
+ c.traceInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
+ defer c.traceInfo.tr.Finish()
+ c.traceInfo.firstLine.client = true
+ if deadline, ok := ctx.Deadline(); ok {
+ c.traceInfo.firstLine.deadline = deadline.Sub(time.Now())
+ }
+ c.traceInfo.tr.LazyLog(&c.traceInfo.firstLine, false)
+ // TODO(dsymonds): Arrange for c.traceInfo.firstLine.remoteAddr to be set.
+ defer func() {
+ if err != nil {
+ c.traceInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
+ c.traceInfo.tr.SetError()
+ }
+ }()
+ }
+ topts := &transport.Options{
+ Last: true,
+ Delay: false,
+ }
+ for {
+ var (
+ err error
+ t transport.ClientTransport
+ stream *transport.Stream
+ // Record the put handler from Balancer.Get(...). It is called once the
+ // RPC has completed or failed.
+ put func()
+ )
+ // TODO(zhaoq): Need a formal spec of fail-fast.
+ callHdr := &transport.CallHdr{
+ Host: cc.authority,
+ Method: method,
+ }
+ if cc.dopts.cp != nil {
+ callHdr.SendCompress = cc.dopts.cp.Type()
+ }
+ gopts := BalancerGetOptions{
+ BlockingWait: !c.failFast,
+ }
+ t, put, err = cc.getTransport(ctx, gopts)
+ if err != nil {
+ // TODO(zhaoq): Probably revisit the error handling.
+ if err == ErrClientConnClosing {
+ return Errorf(codes.FailedPrecondition, "%v", err)
+ }
+ if _, ok := err.(transport.StreamError); ok {
+ return toRPCErr(err)
+ }
+ if _, ok := err.(transport.ConnectionError); ok {
+ if c.failFast {
+ return toRPCErr(err)
+ }
+ }
+ // All the remaining cases are treated as retryable.
+ continue
+ }
+ if c.traceInfo.tr != nil {
+ c.traceInfo.tr.LazyLog(&payload{sent: true, msg: args}, true)
+ }
+ stream, err = sendRequest(ctx, cc.dopts.codec, cc.dopts.cp, callHdr, t, args, topts)
+ if err != nil {
+ if put != nil {
+ put()
+ put = nil
+ }
+ if _, ok := err.(transport.ConnectionError); ok {
+ if c.failFast {
+ return toRPCErr(err)
+ }
+ continue
+ }
+ return toRPCErr(err)
+ }
+ // Receive the response
+ err = recvResponse(cc.dopts, t, &c, stream, reply)
+ if err != nil {
+ if put != nil {
+ put()
+ put = nil
+ }
+ if _, ok := err.(transport.ConnectionError); ok {
+ if c.failFast {
+ return toRPCErr(err)
+ }
+ continue
+ }
+ t.CloseStream(stream, err)
+ return toRPCErr(err)
+ }
+ if c.traceInfo.tr != nil {
+ c.traceInfo.tr.LazyLog(&payload{sent: false, msg: reply}, true)
+ }
+ t.CloseStream(stream, nil)
+ if put != nil {
+ put()
+ put = nil
+ }
+ return Errorf(stream.StatusCode(), "%s", stream.StatusDesc())
+ }
+}
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
new file mode 100644
index 0000000..53a1212
--- /dev/null
+++ b/vendor/google.golang.org/grpc/clientconn.go
@@ -0,0 +1,724 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+package grpc
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/net/trace"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/transport"
+)
+
+var (
+ // ErrClientConnClosing indicates that the operation is illegal because
+ // the ClientConn is closing.
+ ErrClientConnClosing = errors.New("grpc: the client connection is closing")
+
+ // errNoTransportSecurity indicates that there is no transport security
+ // being set for ClientConn. Users should either set one or explicitly
+ // call WithInsecure DialOption to disable security.
+ errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)")
+ // errCredentialsMisuse indicates that users want to transmit security information
+ // (e.g., oauth2 token) which requires secure connection on an insecure
+ // connection.
+ errCredentialsMisuse = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportAuthenticator() to set)")
+ // errClientConnTimeout indicates that the connection could not be
+ // established or re-established within the specified timeout.
+ errClientConnTimeout = errors.New("grpc: timed out trying to connect")
+ // errNetworkIP indicates that the connection is down due to some network I/O error.
+ errNetworkIO = errors.New("grpc: failed with network I/O error")
+ // errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs.
+ errConnDrain = errors.New("grpc: the connection is drained")
+ // errConnClosing indicates that the connection is closing.
+ errConnClosing = errors.New("grpc: the connection is closing")
+ // minimum time to give a connection to complete
+ minConnectTimeout = 20 * time.Second
+)
+
+// dialOptions configure a Dial call. dialOptions are set by the DialOption
+// values passed to Dial.
+type dialOptions struct {
+ codec Codec
+ cp Compressor
+ dc Decompressor
+ bs backoffStrategy
+ balancer Balancer
+ block bool
+ insecure bool
+ copts transport.ConnectOptions
+}
+
+// DialOption configures how we set up the connection.
+type DialOption func(*dialOptions)
+
+// WithCodec returns a DialOption which sets a codec for message marshaling and unmarshaling.
+func WithCodec(c Codec) DialOption {
+ return func(o *dialOptions) {
+ o.codec = c
+ }
+}
+
+// WithCompressor returns a DialOption which sets a CompressorGenerator for generating message
+// compressor.
+func WithCompressor(cp Compressor) DialOption {
+ return func(o *dialOptions) {
+ o.cp = cp
+ }
+}
+
+// WithDecompressor returns a DialOption which sets a DecompressorGenerator for generating
+// message decompressor.
+func WithDecompressor(dc Decompressor) DialOption {
+ return func(o *dialOptions) {
+ o.dc = dc
+ }
+}
+
+// WithBalancer returns a DialOption which sets a load balancer.
+func WithBalancer(b Balancer) DialOption {
+ return func(o *dialOptions) {
+ o.balancer = b
+ }
+}
+
+// WithBackoffMaxDelay configures the dialer to use the provided maximum delay
+// when backing off after failed connection attempts.
+func WithBackoffMaxDelay(md time.Duration) DialOption {
+ return WithBackoffConfig(BackoffConfig{MaxDelay: md})
+}
+
+// WithBackoffConfig configures the dialer to use the provided backoff
+// parameters after connection failures.
+//
+// Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up
+// for use.
+func WithBackoffConfig(b BackoffConfig) DialOption {
+ // Set defaults to ensure that provided BackoffConfig is valid and
+ // unexported fields get default values.
+ setDefaults(&b)
+ return withBackoff(b)
+}
+
+// withBackoff sets the backoff strategy used for retries after a
+// failed connection attempt.
+//
+// This can be exported if arbitrary backoff strategies are allowed by gRPC.
+func withBackoff(bs backoffStrategy) DialOption {
+ return func(o *dialOptions) {
+ o.bs = bs
+ }
+}
+
+// WithBlock returns a DialOption which makes caller of Dial blocks until the underlying
+// connection is up. Without this, Dial returns immediately and connecting the server
+// happens in background.
+func WithBlock() DialOption {
+ return func(o *dialOptions) {
+ o.block = true
+ }
+}
+
+// WithInsecure returns a DialOption which disables transport security for this ClientConn.
+// Note that transport security is required unless WithInsecure is set.
+func WithInsecure() DialOption {
+ return func(o *dialOptions) {
+ o.insecure = true
+ }
+}
+
+// WithTransportCredentials returns a DialOption which configures a
+// connection level security credentials (e.g., TLS/SSL).
+func WithTransportCredentials(creds credentials.TransportAuthenticator) DialOption {
+ return func(o *dialOptions) {
+ o.copts.AuthOptions = append(o.copts.AuthOptions, creds)
+ }
+}
+
+// WithPerRPCCredentials returns a DialOption which sets
+// credentials which will place auth state on each outbound RPC.
+func WithPerRPCCredentials(creds credentials.Credentials) DialOption {
+ return func(o *dialOptions) {
+ o.copts.AuthOptions = append(o.copts.AuthOptions, creds)
+ }
+}
+
+// WithTimeout returns a DialOption that configures a timeout for dialing a client connection.
+func WithTimeout(d time.Duration) DialOption {
+ return func(o *dialOptions) {
+ o.copts.Timeout = d
+ }
+}
+
+// WithDialer returns a DialOption that specifies a function to use for dialing network addresses.
+func WithDialer(f func(addr string, timeout time.Duration) (net.Conn, error)) DialOption {
+ return func(o *dialOptions) {
+ o.copts.Dialer = f
+ }
+}
+
+// WithUserAgent returns a DialOption that specifies a user agent string for all the RPCs.
+func WithUserAgent(s string) DialOption {
+ return func(o *dialOptions) {
+ o.copts.UserAgent = s
+ }
+}
+
+// Dial creates a client connection the given target.
+func Dial(target string, opts ...DialOption) (*ClientConn, error) {
+ cc := &ClientConn{
+ target: target,
+ conns: make(map[Address]*addrConn),
+ }
+ for _, opt := range opts {
+ opt(&cc.dopts)
+ }
+ if cc.dopts.codec == nil {
+ // Set the default codec.
+ cc.dopts.codec = protoCodec{}
+ }
+
+ if cc.dopts.bs == nil {
+ cc.dopts.bs = DefaultBackoffConfig
+ }
+
+ cc.balancer = cc.dopts.balancer
+ if cc.balancer == nil {
+ cc.balancer = RoundRobin(nil)
+ }
+ if err := cc.balancer.Start(target); err != nil {
+ return nil, err
+ }
+ ch := cc.balancer.Notify()
+ if ch == nil {
+ // There is no name resolver installed.
+ addr := Address{Addr: target}
+ if err := cc.newAddrConn(addr, false); err != nil {
+ return nil, err
+ }
+ } else {
+ addrs, ok := <-ch
+ if !ok || len(addrs) == 0 {
+ return nil, fmt.Errorf("grpc: there is no address available to dial")
+ }
+ for _, a := range addrs {
+ if err := cc.newAddrConn(a, false); err != nil {
+ return nil, err
+ }
+ }
+ go cc.lbWatcher()
+ }
+
+ colonPos := strings.LastIndex(target, ":")
+ if colonPos == -1 {
+ colonPos = len(target)
+ }
+ cc.authority = target[:colonPos]
+ return cc, nil
+}
+
+// ConnectivityState indicates the state of a client connection.
+type ConnectivityState int
+
+const (
+ // Idle indicates the ClientConn is idle.
+ Idle ConnectivityState = iota
+ // Connecting indicates the ClienConn is connecting.
+ Connecting
+ // Ready indicates the ClientConn is ready for work.
+ Ready
+ // TransientFailure indicates the ClientConn has seen a failure but expects to recover.
+ TransientFailure
+ // Shutdown indicates the ClientConn has started shutting down.
+ Shutdown
+)
+
+func (s ConnectivityState) String() string {
+ switch s {
+ case Idle:
+ return "IDLE"
+ case Connecting:
+ return "CONNECTING"
+ case Ready:
+ return "READY"
+ case TransientFailure:
+ return "TRANSIENT_FAILURE"
+ case Shutdown:
+ return "SHUTDOWN"
+ default:
+ panic(fmt.Sprintf("unknown connectivity state: %d", s))
+ }
+}
+
+// ClientConn represents a client connection to an RPC server.
+type ClientConn struct {
+ target string
+ balancer Balancer
+ authority string
+ dopts dialOptions
+
+ mu sync.RWMutex
+ conns map[Address]*addrConn
+}
+
+func (cc *ClientConn) lbWatcher() {
+ for addrs := range cc.balancer.Notify() {
+ var (
+ add []Address // Addresses need to setup connections.
+ del []*addrConn // Connections need to tear down.
+ )
+ cc.mu.Lock()
+ for _, a := range addrs {
+ if _, ok := cc.conns[a]; !ok {
+ add = append(add, a)
+ }
+ }
+ for k, c := range cc.conns {
+ var keep bool
+ for _, a := range addrs {
+ if k == a {
+ keep = true
+ break
+ }
+ }
+ if !keep {
+ del = append(del, c)
+ }
+ }
+ cc.mu.Unlock()
+ for _, a := range add {
+ cc.newAddrConn(a, true)
+ }
+ for _, c := range del {
+ c.tearDown(errConnDrain)
+ }
+ }
+}
+
+func (cc *ClientConn) newAddrConn(addr Address, skipWait bool) error {
+ ac := &addrConn{
+ cc: cc,
+ addr: addr,
+ dopts: cc.dopts,
+ shutdownChan: make(chan struct{}),
+ }
+ if EnableTracing {
+ ac.events = trace.NewEventLog("grpc.ClientConn", ac.addr.Addr)
+ }
+ if !ac.dopts.insecure {
+ var ok bool
+ for _, cd := range ac.dopts.copts.AuthOptions {
+ if _, ok = cd.(credentials.TransportAuthenticator); ok {
+ break
+ }
+ }
+ if !ok {
+ return errNoTransportSecurity
+ }
+ } else {
+ for _, cd := range ac.dopts.copts.AuthOptions {
+ if cd.RequireTransportSecurity() {
+ return errCredentialsMisuse
+ }
+ }
+ }
+ // Insert ac into ac.cc.conns. This needs to be done before any getTransport(...) is called.
+ ac.cc.mu.Lock()
+ if ac.cc.conns == nil {
+ ac.cc.mu.Unlock()
+ return ErrClientConnClosing
+ }
+ stale := ac.cc.conns[ac.addr]
+ ac.cc.conns[ac.addr] = ac
+ ac.cc.mu.Unlock()
+ if stale != nil {
+ // There is an addrConn alive on ac.addr already. This could be due to
+ // i) stale's Close is undergoing;
+ // ii) a buggy Balancer notifies duplicated Addresses.
+ stale.tearDown(errConnDrain)
+ }
+ ac.stateCV = sync.NewCond(&ac.mu)
+ // skipWait may overwrite the decision in ac.dopts.block.
+ if ac.dopts.block && !skipWait {
+ if err := ac.resetTransport(false); err != nil {
+ ac.tearDown(err)
+ return err
+ }
+ // Start to monitor the error status of transport.
+ go ac.transportMonitor()
+ } else {
+ // Start a goroutine connecting to the server asynchronously.
+ go func() {
+ if err := ac.resetTransport(false); err != nil {
+ grpclog.Printf("Failed to dial %s: %v; please retry.", ac.addr.Addr, err)
+ ac.tearDown(err)
+ return
+ }
+ ac.transportMonitor()
+ }()
+ }
+ return nil
+}
+
+func (cc *ClientConn) getTransport(ctx context.Context, opts BalancerGetOptions) (transport.ClientTransport, func(), error) {
+ // TODO(zhaoq): Implement fail-fast logic.
+ addr, put, err := cc.balancer.Get(ctx, opts)
+ if err != nil {
+ return nil, nil, err
+ }
+ cc.mu.RLock()
+ if cc.conns == nil {
+ cc.mu.RUnlock()
+ return nil, nil, ErrClientConnClosing
+ }
+ ac, ok := cc.conns[addr]
+ cc.mu.RUnlock()
+ if !ok {
+ if put != nil {
+ put()
+ }
+ return nil, nil, transport.StreamErrorf(codes.Internal, "grpc: failed to find the transport to send the rpc")
+ }
+ t, err := ac.wait(ctx)
+ if err != nil {
+ if put != nil {
+ put()
+ }
+ return nil, nil, err
+ }
+ return t, put, nil
+}
+
+// Close tears down the ClientConn and all underlying connections.
+func (cc *ClientConn) Close() error {
+ cc.mu.Lock()
+ if cc.conns == nil {
+ cc.mu.Unlock()
+ return ErrClientConnClosing
+ }
+ conns := cc.conns
+ cc.conns = nil
+ cc.mu.Unlock()
+ cc.balancer.Close()
+ for _, ac := range conns {
+ ac.tearDown(ErrClientConnClosing)
+ }
+ return nil
+}
+
+// addrConn is a network connection to a given address.
+type addrConn struct {
+ cc *ClientConn
+ addr Address
+ dopts dialOptions
+ shutdownChan chan struct{}
+ events trace.EventLog
+
+ mu sync.Mutex
+ state ConnectivityState
+ stateCV *sync.Cond
+ down func(error) // the handler called when a connection is down.
+ // ready is closed and becomes nil when a new transport is up or failed
+ // due to timeout.
+ ready chan struct{}
+ transport transport.ClientTransport
+}
+
+// printf records an event in ac's event log, unless ac has been closed.
+// REQUIRES ac.mu is held.
+func (ac *addrConn) printf(format string, a ...interface{}) {
+ if ac.events != nil {
+ ac.events.Printf(format, a...)
+ }
+}
+
+// errorf records an error in ac's event log, unless ac has been closed.
+// REQUIRES ac.mu is held.
+func (ac *addrConn) errorf(format string, a ...interface{}) {
+ if ac.events != nil {
+ ac.events.Errorf(format, a...)
+ }
+}
+
+// getState returns the connectivity state of the Conn
+func (ac *addrConn) getState() ConnectivityState {
+ ac.mu.Lock()
+ defer ac.mu.Unlock()
+ return ac.state
+}
+
+// waitForStateChange blocks until the state changes to something other than the sourceState.
+func (ac *addrConn) waitForStateChange(ctx context.Context, sourceState ConnectivityState) (ConnectivityState, error) {
+ ac.mu.Lock()
+ defer ac.mu.Unlock()
+ if sourceState != ac.state {
+ return ac.state, nil
+ }
+ done := make(chan struct{})
+ var err error
+ go func() {
+ select {
+ case <-ctx.Done():
+ ac.mu.Lock()
+ err = ctx.Err()
+ ac.stateCV.Broadcast()
+ ac.mu.Unlock()
+ case <-done:
+ }
+ }()
+ defer close(done)
+ for sourceState == ac.state {
+ ac.stateCV.Wait()
+ if err != nil {
+ return ac.state, err
+ }
+ }
+ return ac.state, nil
+}
+
+func (ac *addrConn) resetTransport(closeTransport bool) error {
+ var retries int
+ start := time.Now()
+ for {
+ ac.mu.Lock()
+ ac.printf("connecting")
+ if ac.state == Shutdown {
+ // ac.tearDown(...) has been invoked.
+ ac.mu.Unlock()
+ return errConnClosing
+ }
+ if ac.down != nil {
+ ac.down(downErrorf(false, true, "%v", errNetworkIO))
+ ac.down = nil
+ }
+ ac.state = Connecting
+ ac.stateCV.Broadcast()
+ t := ac.transport
+ ac.mu.Unlock()
+ if closeTransport && t != nil {
+ t.Close()
+ }
+ // Adjust timeout for the current try.
+ copts := ac.dopts.copts
+ if copts.Timeout < 0 {
+ ac.tearDown(errClientConnTimeout)
+ return errClientConnTimeout
+ }
+ if copts.Timeout > 0 {
+ copts.Timeout -= time.Since(start)
+ if copts.Timeout <= 0 {
+ ac.tearDown(errClientConnTimeout)
+ return errClientConnTimeout
+ }
+ }
+ sleepTime := ac.dopts.bs.backoff(retries)
+ timeout := sleepTime
+ if timeout < minConnectTimeout {
+ timeout = minConnectTimeout
+ }
+ if copts.Timeout == 0 || copts.Timeout > timeout {
+ copts.Timeout = timeout
+ }
+ connectTime := time.Now()
+ newTransport, err := transport.NewClientTransport(ac.addr.Addr, &copts)
+ if err != nil {
+ ac.mu.Lock()
+ if ac.state == Shutdown {
+ // ac.tearDown(...) has been invoked.
+ ac.mu.Unlock()
+ return errConnClosing
+ }
+ ac.errorf("transient failure: %v", err)
+ ac.state = TransientFailure
+ ac.stateCV.Broadcast()
+ if ac.ready != nil {
+ close(ac.ready)
+ ac.ready = nil
+ }
+ ac.mu.Unlock()
+ sleepTime -= time.Since(connectTime)
+ if sleepTime < 0 {
+ sleepTime = 0
+ }
+ // Fail early before falling into sleep.
+ if ac.dopts.copts.Timeout > 0 && ac.dopts.copts.Timeout < sleepTime+time.Since(start) {
+ ac.mu.Lock()
+ ac.errorf("connection timeout")
+ ac.mu.Unlock()
+ ac.tearDown(errClientConnTimeout)
+ return errClientConnTimeout
+ }
+ closeTransport = false
+ select {
+ case <-time.After(sleepTime):
+ case <-ac.shutdownChan:
+ }
+ retries++
+ grpclog.Printf("grpc: addrConn.resetTransport failed to create client transport: %v; Reconnecting to %q", err, ac.addr)
+ continue
+ }
+ ac.mu.Lock()
+ ac.printf("ready")
+ if ac.state == Shutdown {
+ // ac.tearDown(...) has been invoked.
+ ac.mu.Unlock()
+ newTransport.Close()
+ return errConnClosing
+ }
+ ac.state = Ready
+ ac.stateCV.Broadcast()
+ ac.transport = newTransport
+ if ac.ready != nil {
+ close(ac.ready)
+ ac.ready = nil
+ }
+ ac.down = ac.cc.balancer.Up(ac.addr)
+ ac.mu.Unlock()
+ return nil
+ }
+}
+
+// Run in a goroutine to track the error in transport and create the
+// new transport if an error happens. It returns when the channel is closing.
+func (ac *addrConn) transportMonitor() {
+ for {
+ ac.mu.Lock()
+ t := ac.transport
+ ac.mu.Unlock()
+ select {
+ // shutdownChan is needed to detect the teardown when
+ // the addrConn is idle (i.e., no RPC in flight).
+ case <-ac.shutdownChan:
+ return
+ case <-t.Error():
+ ac.mu.Lock()
+ if ac.state == Shutdown {
+ // ac.tearDown(...) has been invoked.
+ ac.mu.Unlock()
+ return
+ }
+ ac.state = TransientFailure
+ ac.stateCV.Broadcast()
+ ac.mu.Unlock()
+ if err := ac.resetTransport(true); err != nil {
+ ac.mu.Lock()
+ ac.printf("transport exiting: %v", err)
+ ac.mu.Unlock()
+ grpclog.Printf("grpc: addrConn.transportMonitor exits due to: %v", err)
+ return
+ }
+ }
+ }
+}
+
+// wait blocks until i) the new transport is up or ii) ctx is done or iii) ac is closed.
+func (ac *addrConn) wait(ctx context.Context) (transport.ClientTransport, error) {
+ for {
+ ac.mu.Lock()
+ switch {
+ case ac.state == Shutdown:
+ ac.mu.Unlock()
+ return nil, errConnClosing
+ case ac.state == Ready:
+ ct := ac.transport
+ ac.mu.Unlock()
+ return ct, nil
+ default:
+ ready := ac.ready
+ if ready == nil {
+ ready = make(chan struct{})
+ ac.ready = ready
+ }
+ ac.mu.Unlock()
+ select {
+ case <-ctx.Done():
+ return nil, transport.ContextErr(ctx.Err())
+ // Wait until the new transport is ready or failed.
+ case <-ready:
+ }
+ }
+ }
+}
+
+// tearDown starts to tear down the addrConn.
+// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in
+// some edge cases (e.g., the caller opens and closes many addrConn's in a
+// tight loop.
+func (ac *addrConn) tearDown(err error) {
+ ac.mu.Lock()
+ defer func() {
+ ac.mu.Unlock()
+ ac.cc.mu.Lock()
+ if ac.cc.conns != nil {
+ delete(ac.cc.conns, ac.addr)
+ }
+ ac.cc.mu.Unlock()
+ }()
+ if ac.state == Shutdown {
+ return
+ }
+ ac.state = Shutdown
+ if ac.down != nil {
+ ac.down(downErrorf(false, false, "%v", err))
+ ac.down = nil
+ }
+ ac.stateCV.Broadcast()
+ if ac.events != nil {
+ ac.events.Finish()
+ ac.events = nil
+ }
+ if ac.ready != nil {
+ close(ac.ready)
+ ac.ready = nil
+ }
+ if ac.transport != nil {
+ if err == errConnDrain {
+ ac.transport.GracefulClose()
+ } else {
+ ac.transport.Close()
+ }
+ }
+ if ac.shutdownChan != nil {
+ close(ac.shutdownChan)
+ }
+ return
+}
diff --git a/vendor/google.golang.org/grpc/codegen.sh b/vendor/google.golang.org/grpc/codegen.sh
new file mode 100755
index 0000000..b009488
--- /dev/null
+++ b/vendor/google.golang.org/grpc/codegen.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+
+# This script serves as an example to demonstrate how to generate the gRPC-Go
+# interface and the related messages from .proto file.
+#
+# It assumes the installation of i) Google proto buffer compiler at
+# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen
+# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have
+# not, please install them first.
+#
+# We recommend running this script at $GOPATH/src.
+#
+# If this is not what you need, feel free to make your own scripts. Again, this
+# script is for demonstration purpose.
+#
+proto=$1
+protoc --go_out=plugins=grpc:. $proto
diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go
new file mode 100644
index 0000000..e6762d0
--- /dev/null
+++ b/vendor/google.golang.org/grpc/codes/code_string.go
@@ -0,0 +1,16 @@
+// generated by stringer -type=Code; DO NOT EDIT
+
+package codes
+
+import "fmt"
+
+const _Code_name = "OKCanceledUnknownInvalidArgumentDeadlineExceededNotFoundAlreadyExistsPermissionDeniedResourceExhaustedFailedPreconditionAbortedOutOfRangeUnimplementedInternalUnavailableDataLossUnauthenticated"
+
+var _Code_index = [...]uint8{0, 2, 10, 17, 32, 48, 56, 69, 85, 102, 120, 127, 137, 150, 158, 169, 177, 192}
+
+func (i Code) String() string {
+ if i+1 >= Code(len(_Code_index)) {
+ return fmt.Sprintf("Code(%d)", i)
+ }
+ return _Code_name[_Code_index[i]:_Code_index[i+1]]
+}
diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go
new file mode 100644
index 0000000..e14b464
--- /dev/null
+++ b/vendor/google.golang.org/grpc/codes/codes.go
@@ -0,0 +1,159 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+// Package codes defines the canonical error codes used by gRPC. It is
+// consistent across various languages.
+package codes // import "google.golang.org/grpc/codes"
+
+// A Code is an unsigned 32-bit error code as defined in the gRPC spec.
+type Code uint32
+
+//go:generate stringer -type=Code
+
+const (
+ // OK is returned on success.
+ OK Code = 0
+
+ // Canceled indicates the operation was cancelled (typically by the caller).
+ Canceled Code = 1
+
+ // Unknown error. An example of where this error may be returned is
+ // if a Status value received from another address space belongs to
+ // an error-space that is not known in this address space. Also
+ // errors raised by APIs that do not return enough error information
+ // may be converted to this error.
+ Unknown Code = 2
+
+ // InvalidArgument indicates client specified an invalid argument.
+ // Note that this differs from FailedPrecondition. It indicates arguments
+ // that are problematic regardless of the state of the system
+ // (e.g., a malformed file name).
+ InvalidArgument Code = 3
+
+ // DeadlineExceeded means operation expired before completion.
+ // For operations that change the state of the system, this error may be
+ // returned even if the operation has completed successfully. For
+ // example, a successful response from a server could have been delayed
+ // long enough for the deadline to expire.
+ DeadlineExceeded Code = 4
+
+ // NotFound means some requested entity (e.g., file or directory) was
+ // not found.
+ NotFound Code = 5
+
+ // AlreadyExists means an attempt to create an entity failed because one
+ // already exists.
+ AlreadyExists Code = 6
+
+ // PermissionDenied indicates the caller does not have permission to
+ // execute the specified operation. It must not be used for rejections
+ // caused by exhausting some resource (use ResourceExhausted
+ // instead for those errors). It must not be
+ // used if the caller cannot be identified (use Unauthenticated
+ // instead for those errors).
+ PermissionDenied Code = 7
+
+ // Unauthenticated indicates the request does not have valid
+ // authentication credentials for the operation.
+ Unauthenticated Code = 16
+
+ // ResourceExhausted indicates some resource has been exhausted, perhaps
+ // a per-user quota, or perhaps the entire file system is out of space.
+ ResourceExhausted Code = 8
+
+ // FailedPrecondition indicates operation was rejected because the
+ // system is not in a state required for the operation's execution.
+ // For example, directory to be deleted may be non-empty, an rmdir
+ // operation is applied to a non-directory, etc.
+ //
+ // A litmus test that may help a service implementor in deciding
+ // between FailedPrecondition, Aborted, and Unavailable:
+ // (a) Use Unavailable if the client can retry just the failing call.
+ // (b) Use Aborted if the client should retry at a higher-level
+ // (e.g., restarting a read-modify-write sequence).
+ // (c) Use FailedPrecondition if the client should not retry until
+ // the system state has been explicitly fixed. E.g., if an "rmdir"
+ // fails because the directory is non-empty, FailedPrecondition
+ // should be returned since the client should not retry unless
+ // they have first fixed up the directory by deleting files from it.
+ // (d) Use FailedPrecondition if the client performs conditional
+ // REST Get/Update/Delete on a resource and the resource on the
+ // server does not match the condition. E.g., conflicting
+ // read-modify-write on the same resource.
+ FailedPrecondition Code = 9
+
+ // Aborted indicates the operation was aborted, typically due to a
+ // concurrency issue like sequencer check failures, transaction aborts,
+ // etc.
+ //
+ // See litmus test above for deciding between FailedPrecondition,
+ // Aborted, and Unavailable.
+ Aborted Code = 10
+
+ // OutOfRange means operation was attempted past the valid range.
+ // E.g., seeking or reading past end of file.
+ //
+ // Unlike InvalidArgument, this error indicates a problem that may
+ // be fixed if the system state changes. For example, a 32-bit file
+ // system will generate InvalidArgument if asked to read at an
+ // offset that is not in the range [0,2^32-1], but it will generate
+ // OutOfRange if asked to read from an offset past the current
+ // file size.
+ //
+ // There is a fair bit of overlap between FailedPrecondition and
+ // OutOfRange. We recommend using OutOfRange (the more specific
+ // error) when it applies so that callers who are iterating through
+ // a space can easily look for an OutOfRange error to detect when
+ // they are done.
+ OutOfRange Code = 11
+
+ // Unimplemented indicates operation is not implemented or not
+ // supported/enabled in this service.
+ Unimplemented Code = 12
+
+ // Internal errors. Means some invariants expected by underlying
+ // system has been broken. If you see one of these errors,
+ // something is very broken.
+ Internal Code = 13
+
+ // Unavailable indicates the service is currently unavailable.
+ // This is a most likely a transient condition and may be corrected
+ // by retrying with a backoff.
+ //
+ // See litmus test above for deciding between FailedPrecondition,
+ // Aborted, and Unavailable.
+ Unavailable Code = 14
+
+ // DataLoss indicates unrecoverable data loss or corruption.
+ DataLoss Code = 15
+)
diff --git a/vendor/google.golang.org/grpc/coverage.sh b/vendor/google.golang.org/grpc/coverage.sh
new file mode 100755
index 0000000..1202353
--- /dev/null
+++ b/vendor/google.golang.org/grpc/coverage.sh
@@ -0,0 +1,47 @@
+#!/bin/bash
+
+set -e
+
+workdir=.cover
+profile="$workdir/cover.out"
+mode=set
+end2endtest="google.golang.org/grpc/test"
+
+generate_cover_data() {
+ rm -rf "$workdir"
+ mkdir "$workdir"
+
+ for pkg in "$@"; do
+ if [ $pkg == "google.golang.org/grpc" -o $pkg == "google.golang.org/grpc/transport" -o $pkg == "google.golang.org/grpc/metadata" -o $pkg == "google.golang.org/grpc/credentials" ]
+ then
+ f="$workdir/$(echo $pkg | tr / -)"
+ go test -covermode="$mode" -coverprofile="$f.cover" "$pkg"
+ go test -covermode="$mode" -coverpkg "$pkg" -coverprofile="$f.e2e.cover" "$end2endtest"
+ fi
+ done
+
+ echo "mode: $mode" >"$profile"
+ grep -h -v "^mode:" "$workdir"/*.cover >>"$profile"
+}
+
+show_cover_report() {
+ go tool cover -${1}="$profile"
+}
+
+push_to_coveralls() {
+ goveralls -coverprofile="$profile"
+}
+
+generate_cover_data $(go list ./...)
+show_cover_report func
+case "$1" in
+"")
+ ;;
+--html)
+ show_cover_report html ;;
+--coveralls)
+ push_to_coveralls ;;
+*)
+ echo >&2 "error: invalid option: $1" ;;
+esac
+rm -rf "$workdir"
diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go
new file mode 100644
index 0000000..681f64e
--- /dev/null
+++ b/vendor/google.golang.org/grpc/credentials/credentials.go
@@ -0,0 +1,226 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+// Package credentials implements various credentials supported by gRPC library,
+// which encapsulate all the state needed by a client to authenticate with a
+// server and make various assertions, e.g., about the client's identity, role,
+// or whether it is authorized to make a particular call.
+package credentials // import "google.golang.org/grpc/credentials"
+
+import (
+ "crypto/tls"
+ "crypto/x509"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "strings"
+ "time"
+
+ "golang.org/x/net/context"
+)
+
+var (
+ // alpnProtoStr are the specified application level protocols for gRPC.
+ alpnProtoStr = []string{"h2"}
+)
+
+// Credentials defines the common interface all supported credentials must
+// implement.
+type Credentials interface {
+ // GetRequestMetadata gets the current request metadata, refreshing
+ // tokens if required. This should be called by the transport layer on
+ // each request, and the data should be populated in headers or other
+ // context. uri is the URI of the entry point for the request. When
+ // supported by the underlying implementation, ctx can be used for
+ // timeout and cancellation.
+ // TODO(zhaoq): Define the set of the qualified keys instead of leaving
+ // it as an arbitrary string.
+ GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error)
+ // RequireTransportSecurity indicates whether the credentails requires
+ // transport security.
+ RequireTransportSecurity() bool
+}
+
+// ProtocolInfo provides information regarding the gRPC wire protocol version,
+// security protocol, security protocol version in use, etc.
+type ProtocolInfo struct {
+ // ProtocolVersion is the gRPC wire protocol version.
+ ProtocolVersion string
+ // SecurityProtocol is the security protocol in use.
+ SecurityProtocol string
+ // SecurityVersion is the security protocol version.
+ SecurityVersion string
+}
+
+// AuthInfo defines the common interface for the auth information the users are interested in.
+type AuthInfo interface {
+ AuthType() string
+}
+
+// TransportAuthenticator defines the common interface for all the live gRPC wire
+// protocols and supported transport security protocols (e.g., TLS, SSL).
+type TransportAuthenticator interface {
+ // ClientHandshake does the authentication handshake specified by the corresponding
+ // authentication protocol on rawConn for clients. It returns the authenticated
+ // connection and the corresponding auth information about the connection.
+ ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (net.Conn, AuthInfo, error)
+ // ServerHandshake does the authentication handshake for servers. It returns
+ // the authenticated connection and the corresponding auth information about
+ // the connection.
+ ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error)
+ // Info provides the ProtocolInfo of this TransportAuthenticator.
+ Info() ProtocolInfo
+ Credentials
+}
+
+// TLSInfo contains the auth information for a TLS authenticated connection.
+// It implements the AuthInfo interface.
+type TLSInfo struct {
+ State tls.ConnectionState
+}
+
+func (t TLSInfo) AuthType() string {
+ return "tls"
+}
+
+// tlsCreds is the credentials required for authenticating a connection using TLS.
+type tlsCreds struct {
+ // TLS configuration
+ config tls.Config
+}
+
+func (c tlsCreds) Info() ProtocolInfo {
+ return ProtocolInfo{
+ SecurityProtocol: "tls",
+ SecurityVersion: "1.2",
+ }
+}
+
+// GetRequestMetadata returns nil, nil since TLS credentials does not have
+// metadata.
+func (c *tlsCreds) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
+ return nil, nil
+}
+
+func (c *tlsCreds) RequireTransportSecurity() bool {
+ return true
+}
+
+type timeoutError struct{}
+
+func (timeoutError) Error() string { return "credentials: Dial timed out" }
+func (timeoutError) Timeout() bool { return true }
+func (timeoutError) Temporary() bool { return true }
+
+func (c *tlsCreds) ClientHandshake(addr string, rawConn net.Conn, timeout time.Duration) (_ net.Conn, _ AuthInfo, err error) {
+ // borrow some code from tls.DialWithDialer
+ var errChannel chan error
+ if timeout != 0 {
+ errChannel = make(chan error, 2)
+ time.AfterFunc(timeout, func() {
+ errChannel <- timeoutError{}
+ })
+ }
+ if c.config.ServerName == "" {
+ colonPos := strings.LastIndex(addr, ":")
+ if colonPos == -1 {
+ colonPos = len(addr)
+ }
+ c.config.ServerName = addr[:colonPos]
+ }
+ conn := tls.Client(rawConn, &c.config)
+ if timeout == 0 {
+ err = conn.Handshake()
+ } else {
+ go func() {
+ errChannel <- conn.Handshake()
+ }()
+ err = <-errChannel
+ }
+ if err != nil {
+ rawConn.Close()
+ return nil, nil, err
+ }
+ // TODO(zhaoq): Omit the auth info for client now. It is more for
+ // information than anything else.
+ return conn, nil, nil
+}
+
+func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) {
+ conn := tls.Server(rawConn, &c.config)
+ if err := conn.Handshake(); err != nil {
+ rawConn.Close()
+ return nil, nil, err
+ }
+ return conn, TLSInfo{conn.ConnectionState()}, nil
+}
+
+// NewTLS uses c to construct a TransportAuthenticator based on TLS.
+func NewTLS(c *tls.Config) TransportAuthenticator {
+ tc := &tlsCreds{*c}
+ tc.config.NextProtos = alpnProtoStr
+ return tc
+}
+
+// NewClientTLSFromCert constructs a TLS from the input certificate for client.
+func NewClientTLSFromCert(cp *x509.CertPool, serverName string) TransportAuthenticator {
+ return NewTLS(&tls.Config{ServerName: serverName, RootCAs: cp})
+}
+
+// NewClientTLSFromFile constructs a TLS from the input certificate file for client.
+func NewClientTLSFromFile(certFile, serverName string) (TransportAuthenticator, error) {
+ b, err := ioutil.ReadFile(certFile)
+ if err != nil {
+ return nil, err
+ }
+ cp := x509.NewCertPool()
+ if !cp.AppendCertsFromPEM(b) {
+ return nil, fmt.Errorf("credentials: failed to append certificates")
+ }
+ return NewTLS(&tls.Config{ServerName: serverName, RootCAs: cp}), nil
+}
+
+// NewServerTLSFromCert constructs a TLS from the input certificate for server.
+func NewServerTLSFromCert(cert *tls.Certificate) TransportAuthenticator {
+ return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}})
+}
+
+// NewServerTLSFromFile constructs a TLS from the input certificate file and key
+// file for server.
+func NewServerTLSFromFile(certFile, keyFile string) (TransportAuthenticator, error) {
+ cert, err := tls.LoadX509KeyPair(certFile, keyFile)
+ if err != nil {
+ return nil, err
+ }
+ return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil
+}
diff --git a/vendor/google.golang.org/grpc/credentials/oauth/oauth.go b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go
new file mode 100644
index 0000000..04943fd
--- /dev/null
+++ b/vendor/google.golang.org/grpc/credentials/oauth/oauth.go
@@ -0,0 +1,177 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+// Package oauth implements gRPC credentials using OAuth.
+package oauth
+
+import (
+ "fmt"
+ "io/ioutil"
+
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/google"
+ "golang.org/x/oauth2/jwt"
+ "google.golang.org/grpc/credentials"
+)
+
+// TokenSource supplies credentials from an oauth2.TokenSource.
+type TokenSource struct {
+ oauth2.TokenSource
+}
+
+// GetRequestMetadata gets the request metadata as a map from a TokenSource.
+func (ts TokenSource) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
+ token, err := ts.Token()
+ if err != nil {
+ return nil, err
+ }
+ return map[string]string{
+ "authorization": token.TokenType + " " + token.AccessToken,
+ }, nil
+}
+
+func (ts TokenSource) RequireTransportSecurity() bool {
+ return true
+}
+
+type jwtAccess struct {
+ jsonKey []byte
+}
+
+func NewJWTAccessFromFile(keyFile string) (credentials.Credentials, error) {
+ jsonKey, err := ioutil.ReadFile(keyFile)
+ if err != nil {
+ return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err)
+ }
+ return NewJWTAccessFromKey(jsonKey)
+}
+
+func NewJWTAccessFromKey(jsonKey []byte) (credentials.Credentials, error) {
+ return jwtAccess{jsonKey}, nil
+}
+
+func (j jwtAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
+ ts, err := google.JWTAccessTokenSourceFromJSON(j.jsonKey, uri[0])
+ if err != nil {
+ return nil, err
+ }
+ token, err := ts.Token()
+ if err != nil {
+ return nil, err
+ }
+ return map[string]string{
+ "authorization": token.TokenType + " " + token.AccessToken,
+ }, nil
+}
+
+func (j jwtAccess) RequireTransportSecurity() bool {
+ return true
+}
+
+// oauthAccess supplies credentials from a given token.
+type oauthAccess struct {
+ token oauth2.Token
+}
+
+// NewOauthAccess constructs the credentials using a given token.
+func NewOauthAccess(token *oauth2.Token) credentials.Credentials {
+ return oauthAccess{token: *token}
+}
+
+func (oa oauthAccess) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
+ return map[string]string{
+ "authorization": oa.token.TokenType + " " + oa.token.AccessToken,
+ }, nil
+}
+
+func (oa oauthAccess) RequireTransportSecurity() bool {
+ return true
+}
+
+// NewComputeEngine constructs the credentials that fetches access tokens from
+// Google Compute Engine (GCE)'s metadata server. It is only valid to use this
+// if your program is running on a GCE instance.
+// TODO(dsymonds): Deprecate and remove this.
+func NewComputeEngine() credentials.Credentials {
+ return TokenSource{google.ComputeTokenSource("")}
+}
+
+// serviceAccount represents credentials via JWT signing key.
+type serviceAccount struct {
+ config *jwt.Config
+}
+
+func (s serviceAccount) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {
+ token, err := s.config.TokenSource(ctx).Token()
+ if err != nil {
+ return nil, err
+ }
+ return map[string]string{
+ "authorization": token.TokenType + " " + token.AccessToken,
+ }, nil
+}
+
+func (s serviceAccount) RequireTransportSecurity() bool {
+ return true
+}
+
+// NewServiceAccountFromKey constructs the credentials using the JSON key slice
+// from a Google Developers service account.
+func NewServiceAccountFromKey(jsonKey []byte, scope ...string) (credentials.Credentials, error) {
+ config, err := google.JWTConfigFromJSON(jsonKey, scope...)
+ if err != nil {
+ return nil, err
+ }
+ return serviceAccount{config: config}, nil
+}
+
+// NewServiceAccountFromFile constructs the credentials using the JSON key file
+// of a Google Developers service account.
+func NewServiceAccountFromFile(keyFile string, scope ...string) (credentials.Credentials, error) {
+ jsonKey, err := ioutil.ReadFile(keyFile)
+ if err != nil {
+ return nil, fmt.Errorf("credentials: failed to read the service account key file: %v", err)
+ }
+ return NewServiceAccountFromKey(jsonKey, scope...)
+}
+
+// NewApplicationDefault returns "Application Default Credentials". For more
+// detail, see https://developers.google.com/accounts/docs/application-default-credentials.
+func NewApplicationDefault(ctx context.Context, scope ...string) (credentials.Credentials, error) {
+ t, err := google.DefaultTokenSource(ctx, scope...)
+ if err != nil {
+ return nil, err
+ }
+ return TokenSource{t}, nil
+}
diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go
new file mode 100644
index 0000000..a35f218
--- /dev/null
+++ b/vendor/google.golang.org/grpc/doc.go
@@ -0,0 +1,6 @@
+/*
+Package grpc implements an RPC system called gRPC.
+
+See www.grpc.io for more information about gRPC.
+*/
+package grpc // import "google.golang.org/grpc"
diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go
new file mode 100644
index 0000000..3b29330
--- /dev/null
+++ b/vendor/google.golang.org/grpc/grpclog/logger.go
@@ -0,0 +1,93 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+Package grpclog defines logging for grpc.
+*/
+package grpclog // import "google.golang.org/grpc/grpclog"
+
+import (
+ "log"
+ "os"
+)
+
+// Use golang's standard logger by default.
+// Access is not mutex-protected: do not modify except in init()
+// functions.
+var logger Logger = log.New(os.Stderr, "", log.LstdFlags)
+
+// Logger mimics golang's standard Logger as an interface.
+type Logger interface {
+ Fatal(args ...interface{})
+ Fatalf(format string, args ...interface{})
+ Fatalln(args ...interface{})
+ Print(args ...interface{})
+ Printf(format string, args ...interface{})
+ Println(args ...interface{})
+}
+
+// SetLogger sets the logger that is used in grpc. Call only from
+// init() functions.
+func SetLogger(l Logger) {
+ logger = l
+}
+
+// Fatal is equivalent to Print() followed by a call to os.Exit() with a non-zero exit code.
+func Fatal(args ...interface{}) {
+ logger.Fatal(args...)
+}
+
+// Fatalf is equivalent to Printf() followed by a call to os.Exit() with a non-zero exit code.
+func Fatalf(format string, args ...interface{}) {
+ logger.Fatalf(format, args...)
+}
+
+// Fatalln is equivalent to Println() followed by a call to os.Exit()) with a non-zero exit code.
+func Fatalln(args ...interface{}) {
+ logger.Fatalln(args...)
+}
+
+// Print prints to the logger. Arguments are handled in the manner of fmt.Print.
+func Print(args ...interface{}) {
+ logger.Print(args...)
+}
+
+// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
+func Printf(format string, args ...interface{}) {
+ logger.Printf(format, args...)
+}
+
+// Println prints to the logger. Arguments are handled in the manner of fmt.Println.
+func Println(args ...interface{}) {
+ logger.Println(args...)
+}
diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go
new file mode 100644
index 0000000..588f59e
--- /dev/null
+++ b/vendor/google.golang.org/grpc/interceptor.go
@@ -0,0 +1,74 @@
+/*
+ *
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+package grpc
+
+import (
+ "golang.org/x/net/context"
+)
+
+// UnaryServerInfo consists of various information about a unary RPC on
+// server side. All per-rpc information may be mutated by the interceptor.
+type UnaryServerInfo struct {
+ // Server is the service implementation the user provides. This is read-only.
+ Server interface{}
+ // FullMethod is the full RPC method string, i.e., /package.service/method.
+ FullMethod string
+}
+
+// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal
+// execution of a unary RPC.
+type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error)
+
+// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info
+// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper
+// of the service method implementation. It is the responsibility of the interceptor to invoke handler
+// to complete the RPC.
+type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error)
+
+// StreamServerInfo consists of various information about a streaming RPC on
+// server side. All per-rpc information may be mutated by the interceptor.
+type StreamServerInfo struct {
+ // FullMethod is the full RPC method string, i.e., /package.service/method.
+ FullMethod string
+ // IsClientStream indicates whether the RPC is a client streaming RPC.
+ IsClientStream bool
+ // IsServerStream indicates whether the RPC is a server streaming RPC.
+ IsServerStream bool
+}
+
+// StreamServerInterceptor provides a hook to intercept the execution of a streaming RPC on the server.
+// info contains all the information of this RPC the interceptor can operate on. And handler is the
+// service method implementation. It is the responsibility of the interceptor to invoke handler to
+// complete the RPC.
+type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error
diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
new file mode 100644
index 0000000..5489143
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/internal.go
@@ -0,0 +1,49 @@
+/*
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+// Package internal contains gRPC-internal code for testing, to avoid polluting
+// the godoc of the top-level grpc package.
+package internal
+
+// TestingCloseConns closes all existing transports but keeps
+// grpcServer.lis accepting new connections.
+//
+// The provided grpcServer must be of type *grpc.Server. It is untyped
+// for circular dependency reasons.
+var TestingCloseConns func(grpcServer interface{})
+
+// TestingUseHandlerImpl enables the http.Handler-based server implementation.
+// It must be called before Serve and requires TLS credentials.
+//
+// The provided grpcServer must be of type *grpc.Server. It is untyped
+// for circular dependency reasons.
+var TestingUseHandlerImpl func(grpcServer interface{})
diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go
new file mode 100644
index 0000000..52070db
--- /dev/null
+++ b/vendor/google.golang.org/grpc/metadata/metadata.go
@@ -0,0 +1,134 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+// Package metadata define the structure of the metadata supported by gRPC library.
+package metadata // import "google.golang.org/grpc/metadata"
+
+import (
+ "encoding/base64"
+ "fmt"
+ "strings"
+
+ "golang.org/x/net/context"
+)
+
+const (
+ binHdrSuffix = "-bin"
+)
+
+// encodeKeyValue encodes key and value qualified for transmission via gRPC.
+// Transmitting binary headers violates HTTP/2 spec.
+// TODO(zhaoq): Maybe check if k is ASCII also.
+func encodeKeyValue(k, v string) (string, string) {
+ k = strings.ToLower(k)
+ if strings.HasSuffix(k, binHdrSuffix) {
+ val := base64.StdEncoding.EncodeToString([]byte(v))
+ v = string(val)
+ }
+ return k, v
+}
+
+// DecodeKeyValue returns the original key and value corresponding to the
+// encoded data in k, v.
+func DecodeKeyValue(k, v string) (string, string, error) {
+ if !strings.HasSuffix(k, binHdrSuffix) {
+ return k, v, nil
+ }
+ val, err := base64.StdEncoding.DecodeString(v)
+ if err != nil {
+ return "", "", err
+ }
+ return k, string(val), nil
+}
+
+// MD is a mapping from metadata keys to values. Users should use the following
+// two convenience functions New and Pairs to generate MD.
+type MD map[string][]string
+
+// New creates a MD from given key-value map.
+func New(m map[string]string) MD {
+ md := MD{}
+ for k, v := range m {
+ key, val := encodeKeyValue(k, v)
+ md[key] = append(md[key], val)
+ }
+ return md
+}
+
+// Pairs returns an MD formed by the mapping of key, value ...
+// Pairs panics if len(kv) is odd.
+func Pairs(kv ...string) MD {
+ if len(kv)%2 == 1 {
+ panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv)))
+ }
+ md := MD{}
+ var k string
+ for i, s := range kv {
+ if i%2 == 0 {
+ k = s
+ continue
+ }
+ key, val := encodeKeyValue(k, s)
+ md[key] = append(md[key], val)
+ }
+ return md
+}
+
+// Len returns the number of items in md.
+func (md MD) Len() int {
+ return len(md)
+}
+
+// Copy returns a copy of md.
+func (md MD) Copy() MD {
+ out := MD{}
+ for k, v := range md {
+ for _, i := range v {
+ out[k] = append(out[k], i)
+ }
+ }
+ return out
+}
+
+type mdKey struct{}
+
+// NewContext creates a new context with md attached.
+func NewContext(ctx context.Context, md MD) context.Context {
+ return context.WithValue(ctx, mdKey{}, md)
+}
+
+// FromContext returns the MD in ctx if it exists.
+func FromContext(ctx context.Context) (md MD, ok bool) {
+ md, ok = ctx.Value(mdKey{}).(MD)
+ return
+}
diff --git a/vendor/google.golang.org/grpc/naming/naming.go b/vendor/google.golang.org/grpc/naming/naming.go
new file mode 100644
index 0000000..c2e0871
--- /dev/null
+++ b/vendor/google.golang.org/grpc/naming/naming.go
@@ -0,0 +1,74 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+// Package naming defines the naming API and related data structures for gRPC.
+// The interface is EXPERIMENTAL and may be suject to change.
+package naming
+
+// Operation defines the corresponding operations for a name resolution change.
+type Operation uint8
+
+const (
+ // Add indicates a new address is added.
+ Add Operation = iota
+ // Delete indicates an exisiting address is deleted.
+ Delete
+)
+
+// Update defines a name resolution update. Notice that it is not valid having both
+// empty string Addr and nil Metadata in an Update.
+type Update struct {
+ // Op indicates the operation of the update.
+ Op Operation
+ // Addr is the updated address. It is empty string if there is no address update.
+ Addr string
+ // Metadata is the updated metadata. It is nil if there is no metadata update.
+ // Metadata is not required for a custom naming implementation.
+ Metadata interface{}
+}
+
+// Resolver creates a Watcher for a target to track its resolution changes.
+type Resolver interface {
+ // Resolve creates a Watcher for target.
+ Resolve(target string) (Watcher, error)
+}
+
+// Watcher watches for the updates on the specified target.
+type Watcher interface {
+ // Next blocks until an update or error happens. It may return one or more
+ // updates. The first call should get the full set of the results. It should
+ // return an error if and only if Watcher cannot recover.
+ Next() ([]*Update, error)
+ // Close closes the Watcher.
+ Close()
+}
diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go
new file mode 100644
index 0000000..bfa6205
--- /dev/null
+++ b/vendor/google.golang.org/grpc/peer/peer.go
@@ -0,0 +1,65 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+// Package peer defines various peer information associated with RPCs and
+// corresponding utils.
+package peer
+
+import (
+ "net"
+
+ "golang.org/x/net/context"
+ "google.golang.org/grpc/credentials"
+)
+
+// Peer contains the information of the peer for an RPC.
+type Peer struct {
+ // Addr is the peer address.
+ Addr net.Addr
+ // AuthInfo is the authentication information of the transport.
+ // It is nil if there is no transport security being used.
+ AuthInfo credentials.AuthInfo
+}
+
+type peerKey struct{}
+
+// NewContext creates a new context with peer information attached.
+func NewContext(ctx context.Context, p *Peer) context.Context {
+ return context.WithValue(ctx, peerKey{}, p)
+}
+
+// FromContext returns the peer information in ctx if it exists.
+func FromContext(ctx context.Context) (p *Peer, ok bool) {
+ p, ok = ctx.Value(peerKey{}).(*Peer)
+ return
+}
diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go
new file mode 100644
index 0000000..06544ad
--- /dev/null
+++ b/vendor/google.golang.org/grpc/rpc_util.go
@@ -0,0 +1,415 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+package grpc
+
+import (
+ "bytes"
+ "compress/gzip"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math"
+ "os"
+
+ "github.com/golang/protobuf/proto"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/transport"
+)
+
+// Codec defines the interface gRPC uses to encode and decode messages.
+type Codec interface {
+ // Marshal returns the wire format of v.
+ Marshal(v interface{}) ([]byte, error)
+ // Unmarshal parses the wire format into v.
+ Unmarshal(data []byte, v interface{}) error
+ // String returns the name of the Codec implementation. The returned
+ // string will be used as part of content type in transmission.
+ String() string
+}
+
+// protoCodec is a Codec implemetation with protobuf. It is the default codec for gRPC.
+type protoCodec struct{}
+
+func (protoCodec) Marshal(v interface{}) ([]byte, error) {
+ return proto.Marshal(v.(proto.Message))
+}
+
+func (protoCodec) Unmarshal(data []byte, v interface{}) error {
+ return proto.Unmarshal(data, v.(proto.Message))
+}
+
+func (protoCodec) String() string {
+ return "proto"
+}
+
+// Compressor defines the interface gRPC uses to compress a message.
+type Compressor interface {
+ // Do compresses p into w.
+ Do(w io.Writer, p []byte) error
+ // Type returns the compression algorithm the Compressor uses.
+ Type() string
+}
+
+// NewGZIPCompressor creates a Compressor based on GZIP.
+func NewGZIPCompressor() Compressor {
+ return &gzipCompressor{}
+}
+
+type gzipCompressor struct {
+}
+
+func (c *gzipCompressor) Do(w io.Writer, p []byte) error {
+ z := gzip.NewWriter(w)
+ if _, err := z.Write(p); err != nil {
+ return err
+ }
+ return z.Close()
+}
+
+func (c *gzipCompressor) Type() string {
+ return "gzip"
+}
+
+// Decompressor defines the interface gRPC uses to decompress a message.
+type Decompressor interface {
+ // Do reads the data from r and uncompress them.
+ Do(r io.Reader) ([]byte, error)
+ // Type returns the compression algorithm the Decompressor uses.
+ Type() string
+}
+
+type gzipDecompressor struct {
+}
+
+// NewGZIPDecompressor creates a Decompressor based on GZIP.
+func NewGZIPDecompressor() Decompressor {
+ return &gzipDecompressor{}
+}
+
+func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) {
+ z, err := gzip.NewReader(r)
+ if err != nil {
+ return nil, err
+ }
+ defer z.Close()
+ return ioutil.ReadAll(z)
+}
+
+func (d *gzipDecompressor) Type() string {
+ return "gzip"
+}
+
+// callInfo contains all related configuration and information about an RPC.
+type callInfo struct {
+ failFast bool
+ headerMD metadata.MD
+ trailerMD metadata.MD
+ traceInfo traceInfo // in trace.go
+}
+
+// CallOption configures a Call before it starts or extracts information from
+// a Call after it completes.
+type CallOption interface {
+ // before is called before the call is sent to any server. If before
+ // returns a non-nil error, the RPC fails with that error.
+ before(*callInfo) error
+
+ // after is called after the call has completed. after cannot return an
+ // error, so any failures should be reported via output parameters.
+ after(*callInfo)
+}
+
+type beforeCall func(c *callInfo) error
+
+func (o beforeCall) before(c *callInfo) error { return o(c) }
+func (o beforeCall) after(c *callInfo) {}
+
+type afterCall func(c *callInfo)
+
+func (o afterCall) before(c *callInfo) error { return nil }
+func (o afterCall) after(c *callInfo) { o(c) }
+
+// Header returns a CallOptions that retrieves the header metadata
+// for a unary RPC.
+func Header(md *metadata.MD) CallOption {
+ return afterCall(func(c *callInfo) {
+ *md = c.headerMD
+ })
+}
+
+// Trailer returns a CallOptions that retrieves the trailer metadata
+// for a unary RPC.
+func Trailer(md *metadata.MD) CallOption {
+ return afterCall(func(c *callInfo) {
+ *md = c.trailerMD
+ })
+}
+
+// The format of the payload: compressed or not?
+type payloadFormat uint8
+
+const (
+ compressionNone payloadFormat = iota // no compression
+ compressionMade
+)
+
+// parser reads complelete gRPC messages from the underlying reader.
+type parser struct {
+ // r is the underlying reader.
+ // See the comment on recvMsg for the permissible
+ // error types.
+ r io.Reader
+
+ // The header of a gRPC message. Find more detail
+ // at http://www.grpc.io/docs/guides/wire.html.
+ header [5]byte
+}
+
+// recvMsg reads a complete gRPC message from the stream.
+//
+// It returns the message and its payload (compression/encoding)
+// format. The caller owns the returned msg memory.
+//
+// If there is an error, possible values are:
+// * io.EOF, when no messages remain
+// * io.ErrUnexpectedEOF
+// * of type transport.ConnectionError
+// * of type transport.StreamError
+// No other error values or types must be returned, which also means
+// that the underlying io.Reader must not return an incompatible
+// error.
+func (p *parser) recvMsg() (pf payloadFormat, msg []byte, err error) {
+ if _, err := io.ReadFull(p.r, p.header[:]); err != nil {
+ return 0, nil, err
+ }
+
+ pf = payloadFormat(p.header[0])
+ length := binary.BigEndian.Uint32(p.header[1:])
+
+ if length == 0 {
+ return pf, nil, nil
+ }
+ // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead
+ // of making it for each message:
+ msg = make([]byte, int(length))
+ if _, err := io.ReadFull(p.r, msg); err != nil {
+ if err == io.EOF {
+ err = io.ErrUnexpectedEOF
+ }
+ return 0, nil, err
+ }
+ return pf, msg, nil
+}
+
+// encode serializes msg and prepends the message header. If msg is nil, it
+// generates the message header of 0 message length.
+func encode(c Codec, msg interface{}, cp Compressor, cbuf *bytes.Buffer) ([]byte, error) {
+ var b []byte
+ var length uint
+ if msg != nil {
+ var err error
+ // TODO(zhaoq): optimize to reduce memory alloc and copying.
+ b, err = c.Marshal(msg)
+ if err != nil {
+ return nil, err
+ }
+ if cp != nil {
+ if err := cp.Do(cbuf, b); err != nil {
+ return nil, err
+ }
+ b = cbuf.Bytes()
+ }
+ length = uint(len(b))
+ }
+ if length > math.MaxUint32 {
+ return nil, Errorf(codes.InvalidArgument, "grpc: message too large (%d bytes)", length)
+ }
+
+ const (
+ payloadLen = 1
+ sizeLen = 4
+ )
+
+ var buf = make([]byte, payloadLen+sizeLen+len(b))
+
+ // Write payload format
+ if cp == nil {
+ buf[0] = byte(compressionNone)
+ } else {
+ buf[0] = byte(compressionMade)
+ }
+ // Write length of b into buf
+ binary.BigEndian.PutUint32(buf[1:], uint32(length))
+ // Copy encoded msg to buf
+ copy(buf[5:], b)
+
+ return buf, nil
+}
+
+func checkRecvPayload(pf payloadFormat, recvCompress string, dc Decompressor) error {
+ switch pf {
+ case compressionNone:
+ case compressionMade:
+ if dc == nil || recvCompress != dc.Type() {
+ return transport.StreamErrorf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
+ }
+ default:
+ return transport.StreamErrorf(codes.Internal, "grpc: received unexpected payload format %d", pf)
+ }
+ return nil
+}
+
+func recv(p *parser, c Codec, s *transport.Stream, dc Decompressor, m interface{}) error {
+ pf, d, err := p.recvMsg()
+ if err != nil {
+ return err
+ }
+ if err := checkRecvPayload(pf, s.RecvCompress(), dc); err != nil {
+ return err
+ }
+ if pf == compressionMade {
+ d, err = dc.Do(bytes.NewReader(d))
+ if err != nil {
+ return transport.StreamErrorf(codes.Internal, "grpc: failed to decompress the received message %v", err)
+ }
+ }
+ if err := c.Unmarshal(d, m); err != nil {
+ return transport.StreamErrorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err)
+ }
+ return nil
+}
+
+// rpcError defines the status from an RPC.
+type rpcError struct {
+ code codes.Code
+ desc string
+}
+
+func (e rpcError) Error() string {
+ return fmt.Sprintf("rpc error: code = %d desc = %s", e.code, e.desc)
+}
+
+// Code returns the error code for err if it was produced by the rpc system.
+// Otherwise, it returns codes.Unknown.
+func Code(err error) codes.Code {
+ if err == nil {
+ return codes.OK
+ }
+ if e, ok := err.(rpcError); ok {
+ return e.code
+ }
+ return codes.Unknown
+}
+
+// ErrorDesc returns the error description of err if it was produced by the rpc system.
+// Otherwise, it returns err.Error() or empty string when err is nil.
+func ErrorDesc(err error) string {
+ if err == nil {
+ return ""
+ }
+ if e, ok := err.(rpcError); ok {
+ return e.desc
+ }
+ return err.Error()
+}
+
+// Errorf returns an error containing an error code and a description;
+// Errorf returns nil if c is OK.
+func Errorf(c codes.Code, format string, a ...interface{}) error {
+ if c == codes.OK {
+ return nil
+ }
+ return rpcError{
+ code: c,
+ desc: fmt.Sprintf(format, a...),
+ }
+}
+
+// toRPCErr converts an error into a rpcError.
+func toRPCErr(err error) error {
+ switch e := err.(type) {
+ case rpcError:
+ return err
+ case transport.StreamError:
+ return rpcError{
+ code: e.Code,
+ desc: e.Desc,
+ }
+ case transport.ConnectionError:
+ return rpcError{
+ code: codes.Internal,
+ desc: e.Desc,
+ }
+ }
+ return Errorf(codes.Unknown, "%v", err)
+}
+
+// convertCode converts a standard Go error into its canonical code. Note that
+// this is only used to translate the error returned by the server applications.
+func convertCode(err error) codes.Code {
+ switch err {
+ case nil:
+ return codes.OK
+ case io.EOF:
+ return codes.OutOfRange
+ case io.ErrClosedPipe, io.ErrNoProgress, io.ErrShortBuffer, io.ErrShortWrite, io.ErrUnexpectedEOF:
+ return codes.FailedPrecondition
+ case os.ErrInvalid:
+ return codes.InvalidArgument
+ case context.Canceled:
+ return codes.Canceled
+ case context.DeadlineExceeded:
+ return codes.DeadlineExceeded
+ }
+ switch {
+ case os.IsExist(err):
+ return codes.AlreadyExists
+ case os.IsNotExist(err):
+ return codes.NotFound
+ case os.IsPermission(err):
+ return codes.PermissionDenied
+ }
+ return codes.Unknown
+}
+
+// SupportPackageIsVersion2 is referenced from generated protocol buffer files
+// to assert that that code is compatible with this version of the grpc package.
+//
+// This constant may be renamed in the future if a change in the generated code
+// requires a synchronised update of grpc-go and protoc-gen-go. This constant
+// should not be referenced from any other code.
+const SupportPackageIsVersion2 = true
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
new file mode 100644
index 0000000..bfb9c60
--- /dev/null
+++ b/vendor/google.golang.org/grpc/server.go
@@ -0,0 +1,787 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+package grpc
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "reflect"
+ "runtime"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/net/http2"
+ "golang.org/x/net/trace"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/internal"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/transport"
+)
+
+type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error)
+
+// MethodDesc represents an RPC service's method specification.
+type MethodDesc struct {
+ MethodName string
+ Handler methodHandler
+}
+
+// ServiceDesc represents an RPC service's specification.
+type ServiceDesc struct {
+ ServiceName string
+ // The pointer to the service interface. Used to check whether the user
+ // provided implementation satisfies the interface requirements.
+ HandlerType interface{}
+ Methods []MethodDesc
+ Streams []StreamDesc
+}
+
+// service consists of the information of the server serving this service and
+// the methods in this service.
+type service struct {
+ server interface{} // the server for service methods
+ md map[string]*MethodDesc
+ sd map[string]*StreamDesc
+}
+
+// Server is a gRPC server to serve RPC requests.
+type Server struct {
+ opts options
+
+ mu sync.Mutex // guards following
+ lis map[net.Listener]bool
+ conns map[io.Closer]bool
+ m map[string]*service // service name -> service info
+ events trace.EventLog
+}
+
+type options struct {
+ creds credentials.Credentials
+ codec Codec
+ cp Compressor
+ dc Decompressor
+ unaryInt UnaryServerInterceptor
+ streamInt StreamServerInterceptor
+ maxConcurrentStreams uint32
+ useHandlerImpl bool // use http.Handler-based server
+}
+
+// A ServerOption sets options.
+type ServerOption func(*options)
+
+// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling.
+func CustomCodec(codec Codec) ServerOption {
+ return func(o *options) {
+ o.codec = codec
+ }
+}
+
+// RPCCompressor returns a ServerOption that sets a compressor for outbound message.
+func RPCCompressor(cp Compressor) ServerOption {
+ return func(o *options) {
+ o.cp = cp
+ }
+}
+
+// RPCDecompressor returns a ServerOption that sets a decompressor for inbound message.
+func RPCDecompressor(dc Decompressor) ServerOption {
+ return func(o *options) {
+ o.dc = dc
+ }
+}
+
+// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number
+// of concurrent streams to each ServerTransport.
+func MaxConcurrentStreams(n uint32) ServerOption {
+ return func(o *options) {
+ o.maxConcurrentStreams = n
+ }
+}
+
+// Creds returns a ServerOption that sets credentials for server connections.
+func Creds(c credentials.Credentials) ServerOption {
+ return func(o *options) {
+ o.creds = c
+ }
+}
+
+// UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the
+// server. Only one unary interceptor can be installed. The construction of multiple
+// interceptors (e.g., chaining) can be implemented at the caller.
+func UnaryInterceptor(i UnaryServerInterceptor) ServerOption {
+ return func(o *options) {
+ if o.unaryInt != nil {
+ panic("The unary server interceptor has been set.")
+ }
+ o.unaryInt = i
+ }
+}
+
+// StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the
+// server. Only one stream interceptor can be installed.
+func StreamInterceptor(i StreamServerInterceptor) ServerOption {
+ return func(o *options) {
+ if o.streamInt != nil {
+ panic("The stream server interceptor has been set.")
+ }
+ o.streamInt = i
+ }
+}
+
+// NewServer creates a gRPC server which has no service registered and has not
+// started to accept requests yet.
+func NewServer(opt ...ServerOption) *Server {
+ var opts options
+ for _, o := range opt {
+ o(&opts)
+ }
+ if opts.codec == nil {
+ // Set the default codec.
+ opts.codec = protoCodec{}
+ }
+ s := &Server{
+ lis: make(map[net.Listener]bool),
+ opts: opts,
+ conns: make(map[io.Closer]bool),
+ m: make(map[string]*service),
+ }
+ if EnableTracing {
+ _, file, line, _ := runtime.Caller(1)
+ s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line))
+ }
+ return s
+}
+
+// printf records an event in s's event log, unless s has been stopped.
+// REQUIRES s.mu is held.
+func (s *Server) printf(format string, a ...interface{}) {
+ if s.events != nil {
+ s.events.Printf(format, a...)
+ }
+}
+
+// errorf records an error in s's event log, unless s has been stopped.
+// REQUIRES s.mu is held.
+func (s *Server) errorf(format string, a ...interface{}) {
+ if s.events != nil {
+ s.events.Errorf(format, a...)
+ }
+}
+
+// RegisterService register a service and its implementation to the gRPC
+// server. Called from the IDL generated code. This must be called before
+// invoking Serve.
+func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) {
+ ht := reflect.TypeOf(sd.HandlerType).Elem()
+ st := reflect.TypeOf(ss)
+ if !st.Implements(ht) {
+ grpclog.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht)
+ }
+ s.register(sd, ss)
+}
+
+func (s *Server) register(sd *ServiceDesc, ss interface{}) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ s.printf("RegisterService(%q)", sd.ServiceName)
+ if _, ok := s.m[sd.ServiceName]; ok {
+ grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName)
+ }
+ srv := &service{
+ server: ss,
+ md: make(map[string]*MethodDesc),
+ sd: make(map[string]*StreamDesc),
+ }
+ for i := range sd.Methods {
+ d := &sd.Methods[i]
+ srv.md[d.MethodName] = d
+ }
+ for i := range sd.Streams {
+ d := &sd.Streams[i]
+ srv.sd[d.StreamName] = d
+ }
+ s.m[sd.ServiceName] = srv
+}
+
+var (
+ // ErrServerStopped indicates that the operation is now illegal because of
+ // the server being stopped.
+ ErrServerStopped = errors.New("grpc: the server has been stopped")
+)
+
+func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) {
+ creds, ok := s.opts.creds.(credentials.TransportAuthenticator)
+ if !ok {
+ return rawConn, nil, nil
+ }
+ return creds.ServerHandshake(rawConn)
+}
+
+// Serve accepts incoming connections on the listener lis, creating a new
+// ServerTransport and service goroutine for each. The service goroutines
+// read gRPC requests and then call the registered handlers to reply to them.
+// Service returns when lis.Accept fails. lis will be closed when
+// this method returns.
+func (s *Server) Serve(lis net.Listener) error {
+ s.mu.Lock()
+ s.printf("serving")
+ if s.lis == nil {
+ s.mu.Unlock()
+ lis.Close()
+ return ErrServerStopped
+ }
+ s.lis[lis] = true
+ s.mu.Unlock()
+ defer func() {
+ lis.Close()
+ s.mu.Lock()
+ delete(s.lis, lis)
+ s.mu.Unlock()
+ }()
+ for {
+ rawConn, err := lis.Accept()
+ if err != nil {
+ s.mu.Lock()
+ s.printf("done serving; Accept = %v", err)
+ s.mu.Unlock()
+ return err
+ }
+ // Start a new goroutine to deal with rawConn
+ // so we don't stall this Accept loop goroutine.
+ go s.handleRawConn(rawConn)
+ }
+}
+
+// handleRawConn is run in its own goroutine and handles a just-accepted
+// connection that has not had any I/O performed on it yet.
+func (s *Server) handleRawConn(rawConn net.Conn) {
+ conn, authInfo, err := s.useTransportAuthenticator(rawConn)
+ if err != nil {
+ s.mu.Lock()
+ s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err)
+ s.mu.Unlock()
+ grpclog.Printf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err)
+ rawConn.Close()
+ return
+ }
+
+ s.mu.Lock()
+ if s.conns == nil {
+ s.mu.Unlock()
+ conn.Close()
+ return
+ }
+ s.mu.Unlock()
+
+ if s.opts.useHandlerImpl {
+ s.serveUsingHandler(conn)
+ } else {
+ s.serveNewHTTP2Transport(conn, authInfo)
+ }
+}
+
+// serveNewHTTP2Transport sets up a new http/2 transport (using the
+// gRPC http2 server transport in transport/http2_server.go) and
+// serves streams on it.
+// This is run in its own goroutine (it does network I/O in
+// transport.NewServerTransport).
+func (s *Server) serveNewHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) {
+ st, err := transport.NewServerTransport("http2", c, s.opts.maxConcurrentStreams, authInfo)
+ if err != nil {
+ s.mu.Lock()
+ s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err)
+ s.mu.Unlock()
+ c.Close()
+ grpclog.Println("grpc: Server.Serve failed to create ServerTransport: ", err)
+ return
+ }
+ if !s.addConn(st) {
+ st.Close()
+ return
+ }
+ s.serveStreams(st)
+}
+
+func (s *Server) serveStreams(st transport.ServerTransport) {
+ defer s.removeConn(st)
+ defer st.Close()
+ var wg sync.WaitGroup
+ st.HandleStreams(func(stream *transport.Stream) {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ s.handleStream(st, stream, s.traceInfo(st, stream))
+ }()
+ })
+ wg.Wait()
+}
+
+var _ http.Handler = (*Server)(nil)
+
+// serveUsingHandler is called from handleRawConn when s is configured
+// to handle requests via the http.Handler interface. It sets up a
+// net/http.Server to handle the just-accepted conn. The http.Server
+// is configured to route all incoming requests (all HTTP/2 streams)
+// to ServeHTTP, which creates a new ServerTransport for each stream.
+// serveUsingHandler blocks until conn closes.
+//
+// This codepath is only used when Server.TestingUseHandlerImpl has
+// been configured. This lets the end2end tests exercise the ServeHTTP
+// method as one of the environment types.
+//
+// conn is the *tls.Conn that's already been authenticated.
+func (s *Server) serveUsingHandler(conn net.Conn) {
+ if !s.addConn(conn) {
+ conn.Close()
+ return
+ }
+ defer s.removeConn(conn)
+ h2s := &http2.Server{
+ MaxConcurrentStreams: s.opts.maxConcurrentStreams,
+ }
+ h2s.ServeConn(conn, &http2.ServeConnOpts{
+ Handler: s,
+ })
+}
+
+func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ st, err := transport.NewServerHandlerTransport(w, r)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ if !s.addConn(st) {
+ st.Close()
+ return
+ }
+ defer s.removeConn(st)
+ s.serveStreams(st)
+}
+
+// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled.
+// If tracing is not enabled, it returns nil.
+func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) {
+ if !EnableTracing {
+ return nil
+ }
+ trInfo = &traceInfo{
+ tr: trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()),
+ }
+ trInfo.firstLine.client = false
+ trInfo.firstLine.remoteAddr = st.RemoteAddr()
+ stream.TraceContext(trInfo.tr)
+ if dl, ok := stream.Context().Deadline(); ok {
+ trInfo.firstLine.deadline = dl.Sub(time.Now())
+ }
+ return trInfo
+}
+
+func (s *Server) addConn(c io.Closer) bool {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if s.conns == nil {
+ return false
+ }
+ s.conns[c] = true
+ return true
+}
+
+func (s *Server) removeConn(c io.Closer) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if s.conns != nil {
+ delete(s.conns, c)
+ }
+}
+
+func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options) error {
+ var cbuf *bytes.Buffer
+ if cp != nil {
+ cbuf = new(bytes.Buffer)
+ }
+ p, err := encode(s.opts.codec, msg, cp, cbuf)
+ if err != nil {
+ // This typically indicates a fatal issue (e.g., memory
+ // corruption or hardware faults) the application program
+ // cannot handle.
+ //
+ // TODO(zhaoq): There exist other options also such as only closing the
+ // faulty stream locally and remotely (Other streams can keep going). Find
+ // the optimal option.
+ grpclog.Fatalf("grpc: Server failed to encode response %v", err)
+ }
+ return t.Write(stream, p, opts)
+}
+
+func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) {
+ if trInfo != nil {
+ defer trInfo.tr.Finish()
+ trInfo.firstLine.client = false
+ trInfo.tr.LazyLog(&trInfo.firstLine, false)
+ defer func() {
+ if err != nil && err != io.EOF {
+ trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
+ trInfo.tr.SetError()
+ }
+ }()
+ }
+ if s.opts.cp != nil {
+ // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686.
+ stream.SetSendCompress(s.opts.cp.Type())
+ }
+ p := &parser{r: stream}
+ for {
+ pf, req, err := p.recvMsg()
+ if err == io.EOF {
+ // The entire stream is done (for unary RPC only).
+ return err
+ }
+ if err == io.ErrUnexpectedEOF {
+ err = transport.StreamError{Code: codes.Internal, Desc: "io.ErrUnexpectedEOF"}
+ }
+ if err != nil {
+ switch err := err.(type) {
+ case transport.ConnectionError:
+ // Nothing to do here.
+ case transport.StreamError:
+ if err := t.WriteStatus(stream, err.Code, err.Desc); err != nil {
+ grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err)
+ }
+ default:
+ panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", err, err))
+ }
+ return err
+ }
+
+ if err := checkRecvPayload(pf, stream.RecvCompress(), s.opts.dc); err != nil {
+ switch err := err.(type) {
+ case transport.StreamError:
+ if err := t.WriteStatus(stream, err.Code, err.Desc); err != nil {
+ grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err)
+ }
+ default:
+ if err := t.WriteStatus(stream, codes.Internal, err.Error()); err != nil {
+ grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err)
+ }
+
+ }
+ return err
+ }
+ statusCode := codes.OK
+ statusDesc := ""
+ df := func(v interface{}) error {
+ if pf == compressionMade {
+ var err error
+ req, err = s.opts.dc.Do(bytes.NewReader(req))
+ if err != nil {
+ if err := t.WriteStatus(stream, codes.Internal, err.Error()); err != nil {
+ grpclog.Printf("grpc: Server.processUnaryRPC failed to write status %v", err)
+ }
+ return err
+ }
+ }
+ if err := s.opts.codec.Unmarshal(req, v); err != nil {
+ return err
+ }
+ if trInfo != nil {
+ trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true)
+ }
+ return nil
+ }
+ reply, appErr := md.Handler(srv.server, stream.Context(), df, s.opts.unaryInt)
+ if appErr != nil {
+ if err, ok := appErr.(rpcError); ok {
+ statusCode = err.code
+ statusDesc = err.desc
+ } else {
+ statusCode = convertCode(appErr)
+ statusDesc = appErr.Error()
+ }
+ if trInfo != nil && statusCode != codes.OK {
+ trInfo.tr.LazyLog(stringer(statusDesc), true)
+ trInfo.tr.SetError()
+ }
+ if err := t.WriteStatus(stream, statusCode, statusDesc); err != nil {
+ grpclog.Printf("grpc: Server.processUnaryRPC failed to write status: %v", err)
+ return err
+ }
+ return nil
+ }
+ if trInfo != nil {
+ trInfo.tr.LazyLog(stringer("OK"), false)
+ }
+ opts := &transport.Options{
+ Last: true,
+ Delay: false,
+ }
+ if err := s.sendResponse(t, stream, reply, s.opts.cp, opts); err != nil {
+ switch err := err.(type) {
+ case transport.ConnectionError:
+ // Nothing to do here.
+ case transport.StreamError:
+ statusCode = err.Code
+ statusDesc = err.Desc
+ default:
+ statusCode = codes.Unknown
+ statusDesc = err.Error()
+ }
+ return err
+ }
+ if trInfo != nil {
+ trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true)
+ }
+ return t.WriteStatus(stream, statusCode, statusDesc)
+ }
+}
+
+func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) {
+ if s.opts.cp != nil {
+ stream.SetSendCompress(s.opts.cp.Type())
+ }
+ ss := &serverStream{
+ t: t,
+ s: stream,
+ p: &parser{r: stream},
+ codec: s.opts.codec,
+ cp: s.opts.cp,
+ dc: s.opts.dc,
+ trInfo: trInfo,
+ }
+ if ss.cp != nil {
+ ss.cbuf = new(bytes.Buffer)
+ }
+ if trInfo != nil {
+ trInfo.tr.LazyLog(&trInfo.firstLine, false)
+ defer func() {
+ ss.mu.Lock()
+ if err != nil && err != io.EOF {
+ ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
+ ss.trInfo.tr.SetError()
+ }
+ ss.trInfo.tr.Finish()
+ ss.trInfo.tr = nil
+ ss.mu.Unlock()
+ }()
+ }
+ var appErr error
+ if s.opts.streamInt == nil {
+ appErr = sd.Handler(srv.server, ss)
+ } else {
+ info := &StreamServerInfo{
+ FullMethod: stream.Method(),
+ IsClientStream: sd.ClientStreams,
+ IsServerStream: sd.ServerStreams,
+ }
+ appErr = s.opts.streamInt(srv.server, ss, info, sd.Handler)
+ }
+ if appErr != nil {
+ if err, ok := appErr.(rpcError); ok {
+ ss.statusCode = err.code
+ ss.statusDesc = err.desc
+ } else if err, ok := appErr.(transport.StreamError); ok {
+ ss.statusCode = err.Code
+ ss.statusDesc = err.Desc
+ } else {
+ ss.statusCode = convertCode(appErr)
+ ss.statusDesc = appErr.Error()
+ }
+ }
+ if trInfo != nil {
+ ss.mu.Lock()
+ if ss.statusCode != codes.OK {
+ ss.trInfo.tr.LazyLog(stringer(ss.statusDesc), true)
+ ss.trInfo.tr.SetError()
+ } else {
+ ss.trInfo.tr.LazyLog(stringer("OK"), false)
+ }
+ ss.mu.Unlock()
+ }
+ return t.WriteStatus(ss.s, ss.statusCode, ss.statusDesc)
+
+}
+
+func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) {
+ sm := stream.Method()
+ if sm != "" && sm[0] == '/' {
+ sm = sm[1:]
+ }
+ pos := strings.LastIndex(sm, "/")
+ if pos == -1 {
+ if trInfo != nil {
+ trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true)
+ trInfo.tr.SetError()
+ }
+ if err := t.WriteStatus(stream, codes.InvalidArgument, fmt.Sprintf("malformed method name: %q", stream.Method())); err != nil {
+ if trInfo != nil {
+ trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
+ trInfo.tr.SetError()
+ }
+ grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err)
+ }
+ if trInfo != nil {
+ trInfo.tr.Finish()
+ }
+ return
+ }
+ service := sm[:pos]
+ method := sm[pos+1:]
+ srv, ok := s.m[service]
+ if !ok {
+ if trInfo != nil {
+ trInfo.tr.LazyLog(&fmtStringer{"Unknown service %v", []interface{}{service}}, true)
+ trInfo.tr.SetError()
+ }
+ if err := t.WriteStatus(stream, codes.Unimplemented, fmt.Sprintf("unknown service %v", service)); err != nil {
+ if trInfo != nil {
+ trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
+ trInfo.tr.SetError()
+ }
+ grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err)
+ }
+ if trInfo != nil {
+ trInfo.tr.Finish()
+ }
+ return
+ }
+ // Unary RPC or Streaming RPC?
+ if md, ok := srv.md[method]; ok {
+ s.processUnaryRPC(t, stream, srv, md, trInfo)
+ return
+ }
+ if sd, ok := srv.sd[method]; ok {
+ s.processStreamingRPC(t, stream, srv, sd, trInfo)
+ return
+ }
+ if trInfo != nil {
+ trInfo.tr.LazyLog(&fmtStringer{"Unknown method %v", []interface{}{method}}, true)
+ trInfo.tr.SetError()
+ }
+ if err := t.WriteStatus(stream, codes.Unimplemented, fmt.Sprintf("unknown method %v", method)); err != nil {
+ if trInfo != nil {
+ trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
+ trInfo.tr.SetError()
+ }
+ grpclog.Printf("grpc: Server.handleStream failed to write status: %v", err)
+ }
+ if trInfo != nil {
+ trInfo.tr.Finish()
+ }
+}
+
+// Stop stops the gRPC server. It immediately closes all open
+// connections and listeners.
+// It cancels all active RPCs on the server side and the corresponding
+// pending RPCs on the client side will get notified by connection
+// errors.
+func (s *Server) Stop() {
+ s.mu.Lock()
+ listeners := s.lis
+ s.lis = nil
+ cs := s.conns
+ s.conns = nil
+ s.mu.Unlock()
+
+ for lis := range listeners {
+ lis.Close()
+ }
+ for c := range cs {
+ c.Close()
+ }
+
+ s.mu.Lock()
+ if s.events != nil {
+ s.events.Finish()
+ s.events = nil
+ }
+ s.mu.Unlock()
+}
+
+func init() {
+ internal.TestingCloseConns = func(arg interface{}) {
+ arg.(*Server).testingCloseConns()
+ }
+ internal.TestingUseHandlerImpl = func(arg interface{}) {
+ arg.(*Server).opts.useHandlerImpl = true
+ }
+}
+
+// testingCloseConns closes all existing transports but keeps s.lis
+// accepting new connections.
+func (s *Server) testingCloseConns() {
+ s.mu.Lock()
+ for c := range s.conns {
+ c.Close()
+ delete(s.conns, c)
+ }
+ s.mu.Unlock()
+}
+
+// SendHeader sends header metadata. It may be called at most once from a unary
+// RPC handler. The ctx is the RPC handler's Context or one derived from it.
+func SendHeader(ctx context.Context, md metadata.MD) error {
+ if md.Len() == 0 {
+ return nil
+ }
+ stream, ok := transport.StreamFromContext(ctx)
+ if !ok {
+ return fmt.Errorf("grpc: failed to fetch the stream from the context %v", ctx)
+ }
+ t := stream.ServerTransport()
+ if t == nil {
+ grpclog.Fatalf("grpc: SendHeader: %v has no ServerTransport to send header metadata.", stream)
+ }
+ return t.WriteHeader(stream, md)
+}
+
+// SetTrailer sets the trailer metadata that will be sent when an RPC returns.
+// It may be called at most once from a unary RPC handler. The ctx is the RPC
+// handler's Context or one derived from it.
+func SetTrailer(ctx context.Context, md metadata.MD) error {
+ if md.Len() == 0 {
+ return nil
+ }
+ stream, ok := transport.StreamFromContext(ctx)
+ if !ok {
+ return fmt.Errorf("grpc: failed to fetch the stream from the context %v", ctx)
+ }
+ return stream.SetTrailer(md)
+}
diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go
new file mode 100644
index 0000000..de125d5
--- /dev/null
+++ b/vendor/google.golang.org/grpc/stream.go
@@ -0,0 +1,424 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+package grpc
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "sync"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/net/trace"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/transport"
+)
+
+// StreamHandler defines the handler called by gRPC server to complete the
+// execution of a streaming RPC.
+type StreamHandler func(srv interface{}, stream ServerStream) error
+
+// StreamDesc represents a streaming RPC service's method specification.
+type StreamDesc struct {
+ StreamName string
+ Handler StreamHandler
+
+ // At least one of these is true.
+ ServerStreams bool
+ ClientStreams bool
+}
+
+// Stream defines the common interface a client or server stream has to satisfy.
+type Stream interface {
+ // Context returns the context for this stream.
+ Context() context.Context
+ // SendMsg blocks until it sends m, the stream is done or the stream
+ // breaks.
+ // On error, it aborts the stream and returns an RPC status on client
+ // side. On server side, it simply returns the error to the caller.
+ // SendMsg is called by generated code. Also Users can call SendMsg
+ // directly when it is really needed in their use cases.
+ SendMsg(m interface{}) error
+ // RecvMsg blocks until it receives a message or the stream is
+ // done. On client side, it returns io.EOF when the stream is done. On
+ // any other error, it aborts the stream and returns an RPC status. On
+ // server side, it simply returns the error to the caller.
+ RecvMsg(m interface{}) error
+}
+
+// ClientStream defines the interface a client stream has to satify.
+type ClientStream interface {
+ // Header returns the header metadata received from the server if there
+ // is any. It blocks if the metadata is not ready to read.
+ Header() (metadata.MD, error)
+ // Trailer returns the trailer metadata from the server. It must be called
+ // after stream.Recv() returns non-nil error (including io.EOF) for
+ // bi-directional streaming and server streaming or stream.CloseAndRecv()
+ // returns for client streaming in order to receive trailer metadata if
+ // present. Otherwise, it could returns an empty MD even though trailer
+ // is present.
+ Trailer() metadata.MD
+ // CloseSend closes the send direction of the stream. It closes the stream
+ // when non-nil error is met.
+ CloseSend() error
+ Stream
+}
+
+// NewClientStream creates a new Stream for the client side. This is called
+// by generated code.
+func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) {
+ var (
+ t transport.ClientTransport
+ err error
+ put func()
+ )
+ // TODO(zhaoq): CallOption is omitted. Add support when it is needed.
+ gopts := BalancerGetOptions{
+ BlockingWait: false,
+ }
+ t, put, err = cc.getTransport(ctx, gopts)
+ if err != nil {
+ return nil, toRPCErr(err)
+ }
+ callHdr := &transport.CallHdr{
+ Host: cc.authority,
+ Method: method,
+ Flush: desc.ServerStreams && desc.ClientStreams,
+ }
+ if cc.dopts.cp != nil {
+ callHdr.SendCompress = cc.dopts.cp.Type()
+ }
+ cs := &clientStream{
+ desc: desc,
+ put: put,
+ codec: cc.dopts.codec,
+ cp: cc.dopts.cp,
+ dc: cc.dopts.dc,
+ tracing: EnableTracing,
+ }
+ if cc.dopts.cp != nil {
+ callHdr.SendCompress = cc.dopts.cp.Type()
+ cs.cbuf = new(bytes.Buffer)
+ }
+ if cs.tracing {
+ cs.trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method)
+ cs.trInfo.firstLine.client = true
+ if deadline, ok := ctx.Deadline(); ok {
+ cs.trInfo.firstLine.deadline = deadline.Sub(time.Now())
+ }
+ cs.trInfo.tr.LazyLog(&cs.trInfo.firstLine, false)
+ ctx = trace.NewContext(ctx, cs.trInfo.tr)
+ }
+ s, err := t.NewStream(ctx, callHdr)
+ if err != nil {
+ cs.finish(err)
+ return nil, toRPCErr(err)
+ }
+ cs.t = t
+ cs.s = s
+ cs.p = &parser{r: s}
+ // Listen on ctx.Done() to detect cancellation when there is no pending
+ // I/O operations on this stream.
+ go func() {
+ select {
+ case <-t.Error():
+ // Incur transport error, simply exit.
+ case <-s.Context().Done():
+ err := s.Context().Err()
+ cs.finish(err)
+ cs.closeTransportStream(transport.ContextErr(err))
+ }
+ }()
+ return cs, nil
+}
+
+// clientStream implements a client side Stream.
+type clientStream struct {
+ t transport.ClientTransport
+ s *transport.Stream
+ p *parser
+ desc *StreamDesc
+ codec Codec
+ cp Compressor
+ cbuf *bytes.Buffer
+ dc Decompressor
+
+ tracing bool // set to EnableTracing when the clientStream is created.
+
+ mu sync.Mutex
+ put func()
+ closed bool
+ // trInfo.tr is set when the clientStream is created (if EnableTracing is true),
+ // and is set to nil when the clientStream's finish method is called.
+ trInfo traceInfo
+}
+
+func (cs *clientStream) Context() context.Context {
+ return cs.s.Context()
+}
+
+func (cs *clientStream) Header() (metadata.MD, error) {
+ m, err := cs.s.Header()
+ if err != nil {
+ if _, ok := err.(transport.ConnectionError); !ok {
+ cs.closeTransportStream(err)
+ }
+ }
+ return m, err
+}
+
+func (cs *clientStream) Trailer() metadata.MD {
+ return cs.s.Trailer()
+}
+
+func (cs *clientStream) SendMsg(m interface{}) (err error) {
+ if cs.tracing {
+ cs.mu.Lock()
+ if cs.trInfo.tr != nil {
+ cs.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
+ }
+ cs.mu.Unlock()
+ }
+ defer func() {
+ if err != nil {
+ cs.finish(err)
+ }
+ if err == nil || err == io.EOF {
+ return
+ }
+ if _, ok := err.(transport.ConnectionError); !ok {
+ cs.closeTransportStream(err)
+ }
+ err = toRPCErr(err)
+ }()
+ out, err := encode(cs.codec, m, cs.cp, cs.cbuf)
+ defer func() {
+ if cs.cbuf != nil {
+ cs.cbuf.Reset()
+ }
+ }()
+ if err != nil {
+ return transport.StreamErrorf(codes.Internal, "grpc: %v", err)
+ }
+ return cs.t.Write(cs.s, out, &transport.Options{Last: false})
+}
+
+func (cs *clientStream) RecvMsg(m interface{}) (err error) {
+ err = recv(cs.p, cs.codec, cs.s, cs.dc, m)
+ defer func() {
+ // err != nil indicates the termination of the stream.
+ if err != nil {
+ cs.finish(err)
+ }
+ }()
+ if err == nil {
+ if cs.tracing {
+ cs.mu.Lock()
+ if cs.trInfo.tr != nil {
+ cs.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
+ }
+ cs.mu.Unlock()
+ }
+ if !cs.desc.ClientStreams || cs.desc.ServerStreams {
+ return
+ }
+ // Special handling for client streaming rpc.
+ err = recv(cs.p, cs.codec, cs.s, cs.dc, m)
+ cs.closeTransportStream(err)
+ if err == nil {
+ return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
+ }
+ if err == io.EOF {
+ if cs.s.StatusCode() == codes.OK {
+ cs.finish(err)
+ return nil
+ }
+ return Errorf(cs.s.StatusCode(), "%s", cs.s.StatusDesc())
+ }
+ return toRPCErr(err)
+ }
+ if _, ok := err.(transport.ConnectionError); !ok {
+ cs.closeTransportStream(err)
+ }
+ if err == io.EOF {
+ if cs.s.StatusCode() == codes.OK {
+ // Returns io.EOF to indicate the end of the stream.
+ return
+ }
+ return Errorf(cs.s.StatusCode(), "%s", cs.s.StatusDesc())
+ }
+ return toRPCErr(err)
+}
+
+func (cs *clientStream) CloseSend() (err error) {
+ err = cs.t.Write(cs.s, nil, &transport.Options{Last: true})
+ defer func() {
+ if err != nil {
+ cs.finish(err)
+ }
+ }()
+ if err == nil || err == io.EOF {
+ return
+ }
+ if _, ok := err.(transport.ConnectionError); !ok {
+ cs.closeTransportStream(err)
+ }
+ err = toRPCErr(err)
+ return
+}
+
+func (cs *clientStream) closeTransportStream(err error) {
+ cs.mu.Lock()
+ if cs.closed {
+ cs.mu.Unlock()
+ return
+ }
+ cs.closed = true
+ cs.mu.Unlock()
+ cs.t.CloseStream(cs.s, err)
+}
+
+func (cs *clientStream) finish(err error) {
+ if !cs.tracing {
+ return
+ }
+ cs.mu.Lock()
+ defer cs.mu.Unlock()
+ if cs.put != nil {
+ cs.put()
+ cs.put = nil
+ }
+ if cs.trInfo.tr != nil {
+ if err == nil || err == io.EOF {
+ cs.trInfo.tr.LazyPrintf("RPC: [OK]")
+ } else {
+ cs.trInfo.tr.LazyPrintf("RPC: [%v]", err)
+ cs.trInfo.tr.SetError()
+ }
+ cs.trInfo.tr.Finish()
+ cs.trInfo.tr = nil
+ }
+}
+
+// ServerStream defines the interface a server stream has to satisfy.
+type ServerStream interface {
+ // SendHeader sends the header metadata. It should not be called
+ // after SendProto. It fails if called multiple times or if
+ // called after SendProto.
+ SendHeader(metadata.MD) error
+ // SetTrailer sets the trailer metadata which will be sent with the
+ // RPC status.
+ SetTrailer(metadata.MD)
+ Stream
+}
+
+// serverStream implements a server side Stream.
+type serverStream struct {
+ t transport.ServerTransport
+ s *transport.Stream
+ p *parser
+ codec Codec
+ cp Compressor
+ dc Decompressor
+ cbuf *bytes.Buffer
+ statusCode codes.Code
+ statusDesc string
+ trInfo *traceInfo
+
+ mu sync.Mutex // protects trInfo.tr after the service handler runs.
+}
+
+func (ss *serverStream) Context() context.Context {
+ return ss.s.Context()
+}
+
+func (ss *serverStream) SendHeader(md metadata.MD) error {
+ return ss.t.WriteHeader(ss.s, md)
+}
+
+func (ss *serverStream) SetTrailer(md metadata.MD) {
+ if md.Len() == 0 {
+ return
+ }
+ ss.s.SetTrailer(md)
+ return
+}
+
+func (ss *serverStream) SendMsg(m interface{}) (err error) {
+ defer func() {
+ if ss.trInfo != nil {
+ ss.mu.Lock()
+ if ss.trInfo.tr != nil {
+ if err == nil {
+ ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true)
+ } else {
+ ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
+ ss.trInfo.tr.SetError()
+ }
+ }
+ ss.mu.Unlock()
+ }
+ }()
+ out, err := encode(ss.codec, m, ss.cp, ss.cbuf)
+ defer func() {
+ if ss.cbuf != nil {
+ ss.cbuf.Reset()
+ }
+ }()
+ if err != nil {
+ err = transport.StreamErrorf(codes.Internal, "grpc: %v", err)
+ return err
+ }
+ return ss.t.Write(ss.s, out, &transport.Options{Last: false})
+}
+
+func (ss *serverStream) RecvMsg(m interface{}) (err error) {
+ defer func() {
+ if ss.trInfo != nil {
+ ss.mu.Lock()
+ if ss.trInfo.tr != nil {
+ if err == nil {
+ ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true)
+ } else if err != io.EOF {
+ ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true)
+ ss.trInfo.tr.SetError()
+ }
+ }
+ ss.mu.Unlock()
+ }
+ }()
+ return recv(ss.p, ss.codec, ss.s, ss.dc, m)
+}
diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go
new file mode 100644
index 0000000..f6747e1
--- /dev/null
+++ b/vendor/google.golang.org/grpc/trace.go
@@ -0,0 +1,119 @@
+/*
+ *
+ * Copyright 2015, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+package grpc
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "net"
+ "strings"
+ "time"
+
+ "golang.org/x/net/trace"
+)
+
+// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package.
+// This should only be set before any RPCs are sent or received by this program.
+var EnableTracing = true
+
+// methodFamily returns the trace family for the given method.
+// It turns "/pkg.Service/GetFoo" into "pkg.Service".
+func methodFamily(m string) string {
+ m = strings.TrimPrefix(m, "/") // remove leading slash
+ if i := strings.Index(m, "/"); i >= 0 {
+ m = m[:i] // remove everything from second slash
+ }
+ if i := strings.LastIndex(m, "."); i >= 0 {
+ m = m[i+1:] // cut down to last dotted component
+ }
+ return m
+}
+
+// traceInfo contains tracing information for an RPC.
+type traceInfo struct {
+ tr trace.Trace
+ firstLine firstLine
+}
+
+// firstLine is the first line of an RPC trace.
+type firstLine struct {
+ client bool // whether this is a client (outgoing) RPC
+ remoteAddr net.Addr
+ deadline time.Duration // may be zero
+}
+
+func (f *firstLine) String() string {
+ var line bytes.Buffer
+ io.WriteString(&line, "RPC: ")
+ if f.client {
+ io.WriteString(&line, "to")
+ } else {
+ io.WriteString(&line, "from")
+ }
+ fmt.Fprintf(&line, " %v deadline:", f.remoteAddr)
+ if f.deadline != 0 {
+ fmt.Fprint(&line, f.deadline)
+ } else {
+ io.WriteString(&line, "none")
+ }
+ return line.String()
+}
+
+// payload represents an RPC request or response payload.
+type payload struct {
+ sent bool // whether this is an outgoing payload
+ msg interface{} // e.g. a proto.Message
+ // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here?
+}
+
+func (p payload) String() string {
+ if p.sent {
+ return fmt.Sprintf("sent: %v", p.msg)
+ }
+ return fmt.Sprintf("recv: %v", p.msg)
+}
+
+type fmtStringer struct {
+ format string
+ a []interface{}
+}
+
+func (f *fmtStringer) String() string {
+ return fmt.Sprintf(f.format, f.a...)
+}
+
+type stringer string
+
+func (s stringer) String() string { return string(s) }
diff --git a/vendor/google.golang.org/grpc/transport/control.go b/vendor/google.golang.org/grpc/transport/control.go
new file mode 100644
index 0000000..7e9bdf3
--- /dev/null
+++ b/vendor/google.golang.org/grpc/transport/control.go
@@ -0,0 +1,210 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+package transport
+
+import (
+ "fmt"
+ "sync"
+
+ "golang.org/x/net/http2"
+)
+
+const (
+ // The default value of flow control window size in HTTP2 spec.
+ defaultWindowSize = 65535
+ // The initial window size for flow control.
+ initialWindowSize = defaultWindowSize // for an RPC
+ initialConnWindowSize = defaultWindowSize * 16 // for a connection
+)
+
+// The following defines various control items which could flow through
+// the control buffer of transport. They represent different aspects of
+// control tasks, e.g., flow control, settings, streaming resetting, etc.
+type windowUpdate struct {
+ streamID uint32
+ increment uint32
+}
+
+func (*windowUpdate) item() {}
+
+type settings struct {
+ ack bool
+ ss []http2.Setting
+}
+
+func (*settings) item() {}
+
+type resetStream struct {
+ streamID uint32
+ code http2.ErrCode
+}
+
+func (*resetStream) item() {}
+
+type flushIO struct {
+}
+
+func (*flushIO) item() {}
+
+type ping struct {
+ ack bool
+ data [8]byte
+}
+
+func (*ping) item() {}
+
+// quotaPool is a pool which accumulates the quota and sends it to acquire()
+// when it is available.
+type quotaPool struct {
+ c chan int
+
+ mu sync.Mutex
+ quota int
+}
+
+// newQuotaPool creates a quotaPool which has quota q available to consume.
+func newQuotaPool(q int) *quotaPool {
+ qb := &quotaPool{
+ c: make(chan int, 1),
+ }
+ if q > 0 {
+ qb.c <- q
+ } else {
+ qb.quota = q
+ }
+ return qb
+}
+
+// add adds n to the available quota and tries to send it on acquire.
+func (qb *quotaPool) add(n int) {
+ qb.mu.Lock()
+ defer qb.mu.Unlock()
+ qb.quota += n
+ if qb.quota <= 0 {
+ return
+ }
+ select {
+ case qb.c <- qb.quota:
+ qb.quota = 0
+ default:
+ }
+}
+
+// cancel cancels the pending quota sent on acquire, if any.
+func (qb *quotaPool) cancel() {
+ qb.mu.Lock()
+ defer qb.mu.Unlock()
+ select {
+ case n := <-qb.c:
+ qb.quota += n
+ default:
+ }
+}
+
+// reset cancels the pending quota sent on acquired, incremented by v and sends
+// it back on acquire.
+func (qb *quotaPool) reset(v int) {
+ qb.mu.Lock()
+ defer qb.mu.Unlock()
+ select {
+ case n := <-qb.c:
+ qb.quota += n
+ default:
+ }
+ qb.quota += v
+ if qb.quota <= 0 {
+ return
+ }
+ select {
+ case qb.c <- qb.quota:
+ qb.quota = 0
+ default:
+ }
+}
+
+// acquire returns the channel on which available quota amounts are sent.
+func (qb *quotaPool) acquire() <-chan int {
+ return qb.c
+}
+
+// inFlow deals with inbound flow control
+type inFlow struct {
+ // The inbound flow control limit for pending data.
+ limit uint32
+
+ mu sync.Mutex
+ // pendingData is the overall data which have been received but not been
+ // consumed by applications.
+ pendingData uint32
+ // The amount of data the application has consumed but grpc has not sent
+ // window update for them. Used to reduce window update frequency.
+ pendingUpdate uint32
+}
+
+// onData is invoked when some data frame is received. It updates pendingData.
+func (f *inFlow) onData(n uint32) error {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ f.pendingData += n
+ if f.pendingData+f.pendingUpdate > f.limit {
+ return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", f.pendingData+f.pendingUpdate, f.limit)
+ }
+ return nil
+}
+
+// onRead is invoked when the application reads the data. It returns the window size
+// to be sent to the peer.
+func (f *inFlow) onRead(n uint32) uint32 {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ if f.pendingData == 0 {
+ return 0
+ }
+ f.pendingData -= n
+ f.pendingUpdate += n
+ if f.pendingUpdate >= f.limit/4 {
+ wu := f.pendingUpdate
+ f.pendingUpdate = 0
+ return wu
+ }
+ return 0
+}
+
+func (f *inFlow) resetPendingData() uint32 {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ n := f.pendingData
+ f.pendingData = 0
+ return n
+}
diff --git a/vendor/google.golang.org/grpc/transport/handler_server.go b/vendor/google.golang.org/grpc/transport/handler_server.go
new file mode 100644
index 0000000..7a4ae07
--- /dev/null
+++ b/vendor/google.golang.org/grpc/transport/handler_server.go
@@ -0,0 +1,393 @@
+/*
+ * Copyright 2016, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+// This file is the implementation of a gRPC server using HTTP/2 which
+// uses the standard Go http2 Server implementation (via the
+// http.Handler interface), rather than speaking low-level HTTP/2
+// frames itself. It is the implementation of *grpc.Server.ServeHTTP.
+
+package transport
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/net/http2"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/peer"
+)
+
+// NewServerHandlerTransport returns a ServerTransport handling gRPC
+// from inside an http.Handler. It requires that the http Server
+// supports HTTP/2.
+func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request) (ServerTransport, error) {
+ if r.ProtoMajor != 2 {
+ return nil, errors.New("gRPC requires HTTP/2")
+ }
+ if r.Method != "POST" {
+ return nil, errors.New("invalid gRPC request method")
+ }
+ if !strings.Contains(r.Header.Get("Content-Type"), "application/grpc") {
+ return nil, errors.New("invalid gRPC request content-type")
+ }
+ if _, ok := w.(http.Flusher); !ok {
+ return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher")
+ }
+ if _, ok := w.(http.CloseNotifier); !ok {
+ return nil, errors.New("gRPC requires a ResponseWriter supporting http.CloseNotifier")
+ }
+
+ st := &serverHandlerTransport{
+ rw: w,
+ req: r,
+ closedCh: make(chan struct{}),
+ writes: make(chan func()),
+ }
+
+ if v := r.Header.Get("grpc-timeout"); v != "" {
+ to, err := timeoutDecode(v)
+ if err != nil {
+ return nil, StreamErrorf(codes.Internal, "malformed time-out: %v", err)
+ }
+ st.timeoutSet = true
+ st.timeout = to
+ }
+
+ var metakv []string
+ if r.Host != "" {
+ metakv = append(metakv, ":authority", r.Host)
+ }
+ for k, vv := range r.Header {
+ k = strings.ToLower(k)
+ if isReservedHeader(k) && !isWhitelistedPseudoHeader(k){
+ continue
+ }
+ for _, v := range vv {
+ if k == "user-agent" {
+ // user-agent is special. Copying logic of http_util.go.
+ if i := strings.LastIndex(v, " "); i == -1 {
+ // There is no application user agent string being set
+ continue
+ } else {
+ v = v[:i]
+ }
+ }
+ metakv = append(metakv, k, v)
+ }
+ }
+ st.headerMD = metadata.Pairs(metakv...)
+
+ return st, nil
+}
+
+// serverHandlerTransport is an implementation of ServerTransport
+// which replies to exactly one gRPC request (exactly one HTTP request),
+// using the net/http.Handler interface. This http.Handler is guaranteed
+// at this point to be speaking over HTTP/2, so it's able to speak valid
+// gRPC.
+type serverHandlerTransport struct {
+ rw http.ResponseWriter
+ req *http.Request
+ timeoutSet bool
+ timeout time.Duration
+ didCommonHeaders bool
+
+ headerMD metadata.MD
+
+ closeOnce sync.Once
+ closedCh chan struct{} // closed on Close
+
+ // writes is a channel of code to run serialized in the
+ // ServeHTTP (HandleStreams) goroutine. The channel is closed
+ // when WriteStatus is called.
+ writes chan func()
+}
+
+func (ht *serverHandlerTransport) Close() error {
+ ht.closeOnce.Do(ht.closeCloseChanOnce)
+ return nil
+}
+
+func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) }
+
+func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) }
+
+// strAddr is a net.Addr backed by either a TCP "ip:port" string, or
+// the empty string if unknown.
+type strAddr string
+
+func (a strAddr) Network() string {
+ if a != "" {
+ // Per the documentation on net/http.Request.RemoteAddr, if this is
+ // set, it's set to the IP:port of the peer (hence, TCP):
+ // https://golang.org/pkg/net/http/#Request
+ //
+ // If we want to support Unix sockets later, we can
+ // add our own grpc-specific convention within the
+ // grpc codebase to set RemoteAddr to a different
+ // format, or probably better: we can attach it to the
+ // context and use that from serverHandlerTransport.RemoteAddr.
+ return "tcp"
+ }
+ return ""
+}
+
+func (a strAddr) String() string { return string(a) }
+
+// do runs fn in the ServeHTTP goroutine.
+func (ht *serverHandlerTransport) do(fn func()) error {
+ select {
+ case ht.writes <- fn:
+ return nil
+ case <-ht.closedCh:
+ return ErrConnClosing
+ }
+}
+
+func (ht *serverHandlerTransport) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error {
+ err := ht.do(func() {
+ ht.writeCommonHeaders(s)
+
+ // And flush, in case no header or body has been sent yet.
+ // This forces a separation of headers and trailers if this is the
+ // first call (for example, in end2end tests's TestNoService).
+ ht.rw.(http.Flusher).Flush()
+
+ h := ht.rw.Header()
+ h.Set("Grpc-Status", fmt.Sprintf("%d", statusCode))
+ if statusDesc != "" {
+ h.Set("Grpc-Message", statusDesc)
+ }
+ if md := s.Trailer(); len(md) > 0 {
+ for k, vv := range md {
+ // Clients don't tolerate reading restricted headers after some non restricted ones were sent.
+ if isReservedHeader(k) {
+ continue
+ }
+ for _, v := range vv {
+ // http2 ResponseWriter mechanism to
+ // send undeclared Trailers after the
+ // headers have possibly been written.
+ h.Add(http2.TrailerPrefix+k, v)
+ }
+ }
+ }
+ })
+ close(ht.writes)
+ return err
+}
+
+// writeCommonHeaders sets common headers on the first write
+// call (Write, WriteHeader, or WriteStatus).
+func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) {
+ if ht.didCommonHeaders {
+ return
+ }
+ ht.didCommonHeaders = true
+
+ h := ht.rw.Header()
+ h["Date"] = nil // suppress Date to make tests happy; TODO: restore
+ h.Set("Content-Type", "application/grpc")
+
+ // Predeclare trailers we'll set later in WriteStatus (after the body).
+ // This is a SHOULD in the HTTP RFC, and the way you add (known)
+ // Trailers per the net/http.ResponseWriter contract.
+ // See https://golang.org/pkg/net/http/#ResponseWriter
+ // and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers
+ h.Add("Trailer", "Grpc-Status")
+ h.Add("Trailer", "Grpc-Message")
+
+ if s.sendCompress != "" {
+ h.Set("Grpc-Encoding", s.sendCompress)
+ }
+}
+
+func (ht *serverHandlerTransport) Write(s *Stream, data []byte, opts *Options) error {
+ return ht.do(func() {
+ ht.writeCommonHeaders(s)
+ ht.rw.Write(data)
+ if !opts.Delay {
+ ht.rw.(http.Flusher).Flush()
+ }
+ })
+}
+
+func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
+ return ht.do(func() {
+ ht.writeCommonHeaders(s)
+ h := ht.rw.Header()
+ for k, vv := range md {
+ // Clients don't tolerate reading restricted headers after some non restricted ones were sent.
+ if isReservedHeader(k) {
+ continue
+ }
+ for _, v := range vv {
+ h.Add(k, v)
+ }
+ }
+ ht.rw.WriteHeader(200)
+ ht.rw.(http.Flusher).Flush()
+ })
+}
+
+func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) {
+ // With this transport type there will be exactly 1 stream: this HTTP request.
+
+ var ctx context.Context
+ var cancel context.CancelFunc
+ if ht.timeoutSet {
+ ctx, cancel = context.WithTimeout(context.Background(), ht.timeout)
+ } else {
+ ctx, cancel = context.WithCancel(context.Background())
+ }
+
+ // requestOver is closed when either the request's context is done
+ // or the status has been written via WriteStatus.
+ requestOver := make(chan struct{})
+
+ // clientGone receives a single value if peer is gone, either
+ // because the underlying connection is dead or because the
+ // peer sends an http2 RST_STREAM.
+ clientGone := ht.rw.(http.CloseNotifier).CloseNotify()
+ go func() {
+ select {
+ case <-requestOver:
+ return
+ case <-ht.closedCh:
+ case <-clientGone:
+ }
+ cancel()
+ }()
+
+ req := ht.req
+
+ s := &Stream{
+ id: 0, // irrelevant
+ windowHandler: func(int) {}, // nothing
+ cancel: cancel,
+ buf: newRecvBuffer(),
+ st: ht,
+ method: req.URL.Path,
+ recvCompress: req.Header.Get("grpc-encoding"),
+ }
+ pr := &peer.Peer{
+ Addr: ht.RemoteAddr(),
+ }
+ if req.TLS != nil {
+ pr.AuthInfo = credentials.TLSInfo{*req.TLS}
+ }
+ ctx = metadata.NewContext(ctx, ht.headerMD)
+ ctx = peer.NewContext(ctx, pr)
+ s.ctx = newContextWithStream(ctx, s)
+ s.dec = &recvBufferReader{ctx: s.ctx, recv: s.buf}
+
+ // readerDone is closed when the Body.Read-ing goroutine exits.
+ readerDone := make(chan struct{})
+ go func() {
+ defer close(readerDone)
+
+ // TODO: minimize garbage, optimize recvBuffer code/ownership
+ const readSize = 8196
+ for buf := make([]byte, readSize); ; {
+ n, err := req.Body.Read(buf)
+ if n > 0 {
+ s.buf.put(&recvMsg{data: buf[:n:n]})
+ buf = buf[n:]
+ }
+ if err != nil {
+ s.buf.put(&recvMsg{err: mapRecvMsgError(err)})
+ return
+ }
+ if len(buf) == 0 {
+ buf = make([]byte, readSize)
+ }
+ }
+ }()
+
+ // startStream is provided by the *grpc.Server's serveStreams.
+ // It starts a goroutine serving s and exits immediately.
+ // The goroutine that is started is the one that then calls
+ // into ht, calling WriteHeader, Write, WriteStatus, Close, etc.
+ startStream(s)
+
+ ht.runStream()
+ close(requestOver)
+
+ // Wait for reading goroutine to finish.
+ req.Body.Close()
+ <-readerDone
+}
+
+func (ht *serverHandlerTransport) runStream() {
+ for {
+ select {
+ case fn, ok := <-ht.writes:
+ if !ok {
+ return
+ }
+ fn()
+ case <-ht.closedCh:
+ return
+ }
+ }
+}
+
+// mapRecvMsgError returns the non-nil err into the appropriate
+// error value as expected by callers of *grpc.parser.recvMsg.
+// In particular, in can only be:
+// * io.EOF
+// * io.ErrUnexpectedEOF
+// * of type transport.ConnectionError
+// * of type transport.StreamError
+func mapRecvMsgError(err error) error {
+ if err == io.EOF || err == io.ErrUnexpectedEOF {
+ return err
+ }
+ if se, ok := err.(http2.StreamError); ok {
+ if code, ok := http2ErrConvTab[se.Code]; ok {
+ return StreamError{
+ Code: code,
+ Desc: se.Error(),
+ }
+ }
+ }
+ return ConnectionError{Desc: err.Error()}
+}
diff --git a/vendor/google.golang.org/grpc/transport/http2_client.go b/vendor/google.golang.org/grpc/transport/http2_client.go
new file mode 100644
index 0000000..e624f8d
--- /dev/null
+++ b/vendor/google.golang.org/grpc/transport/http2_client.go
@@ -0,0 +1,953 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+package transport
+
+import (
+ "bytes"
+ "io"
+ "math"
+ "net"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/net/http2"
+ "golang.org/x/net/http2/hpack"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/peer"
+)
+
+// http2Client implements the ClientTransport interface with HTTP2.
+type http2Client struct {
+ target string // server name/addr
+ userAgent string
+ conn net.Conn // underlying communication channel
+ authInfo credentials.AuthInfo // auth info about the connection
+ nextID uint32 // the next stream ID to be used
+
+ // writableChan synchronizes write access to the transport.
+ // A writer acquires the write lock by sending a value on writableChan
+ // and releases it by receiving from writableChan.
+ writableChan chan int
+ // shutdownChan is closed when Close is called.
+ // Blocking operations should select on shutdownChan to avoid
+ // blocking forever after Close.
+ // TODO(zhaoq): Maybe have a channel context?
+ shutdownChan chan struct{}
+ // errorChan is closed to notify the I/O error to the caller.
+ errorChan chan struct{}
+
+ framer *framer
+ hBuf *bytes.Buffer // the buffer for HPACK encoding
+ hEnc *hpack.Encoder // HPACK encoder
+
+ // controlBuf delivers all the control related tasks (e.g., window
+ // updates, reset streams, and various settings) to the controller.
+ controlBuf *recvBuffer
+ fc *inFlow
+ // sendQuotaPool provides flow control to outbound message.
+ sendQuotaPool *quotaPool
+ // streamsQuota limits the max number of concurrent streams.
+ streamsQuota *quotaPool
+
+ // The scheme used: https if TLS is on, http otherwise.
+ scheme string
+
+ authCreds []credentials.Credentials
+
+ mu sync.Mutex // guard the following variables
+ state transportState // the state of underlying connection
+ activeStreams map[uint32]*Stream
+ // The max number of concurrent streams
+ maxStreams int
+ // the per-stream outbound flow control window size set by the peer.
+ streamSendQuota uint32
+}
+
+// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2
+// and starts to receive messages on it. Non-nil error returns if construction
+// fails.
+func newHTTP2Client(addr string, opts *ConnectOptions) (_ ClientTransport, err error) {
+ if opts.Dialer == nil {
+ // Set the default Dialer.
+ opts.Dialer = func(addr string, timeout time.Duration) (net.Conn, error) {
+ return net.DialTimeout("tcp", addr, timeout)
+ }
+ }
+ scheme := "http"
+ startT := time.Now()
+ timeout := opts.Timeout
+ conn, connErr := opts.Dialer(addr, timeout)
+ if connErr != nil {
+ return nil, ConnectionErrorf("transport: %v", connErr)
+ }
+ var authInfo credentials.AuthInfo
+ for _, c := range opts.AuthOptions {
+ if ccreds, ok := c.(credentials.TransportAuthenticator); ok {
+ scheme = "https"
+ // TODO(zhaoq): Now the first TransportAuthenticator is used if there are
+ // multiple ones provided. Revisit this if it is not appropriate. Probably
+ // place the ClientTransport construction into a separate function to make
+ // things clear.
+ if timeout > 0 {
+ timeout -= time.Since(startT)
+ }
+ conn, authInfo, connErr = ccreds.ClientHandshake(addr, conn, timeout)
+ break
+ }
+ }
+ if connErr != nil {
+ return nil, ConnectionErrorf("transport: %v", connErr)
+ }
+ defer func() {
+ if err != nil {
+ conn.Close()
+ }
+ }()
+ ua := primaryUA
+ if opts.UserAgent != "" {
+ ua = opts.UserAgent + " " + ua
+ }
+ var buf bytes.Buffer
+ t := &http2Client{
+ target: addr,
+ userAgent: ua,
+ conn: conn,
+ authInfo: authInfo,
+ // The client initiated stream id is odd starting from 1.
+ nextID: 1,
+ writableChan: make(chan int, 1),
+ shutdownChan: make(chan struct{}),
+ errorChan: make(chan struct{}),
+ framer: newFramer(conn),
+ hBuf: &buf,
+ hEnc: hpack.NewEncoder(&buf),
+ controlBuf: newRecvBuffer(),
+ fc: &inFlow{limit: initialConnWindowSize},
+ sendQuotaPool: newQuotaPool(defaultWindowSize),
+ scheme: scheme,
+ state: reachable,
+ activeStreams: make(map[uint32]*Stream),
+ authCreds: opts.AuthOptions,
+ maxStreams: math.MaxInt32,
+ streamSendQuota: defaultWindowSize,
+ }
+ // Start the reader goroutine for incoming message. Each transport has
+ // a dedicated goroutine which reads HTTP2 frame from network. Then it
+ // dispatches the frame to the corresponding stream entity.
+ go t.reader()
+ // Send connection preface to server.
+ n, err := t.conn.Write(clientPreface)
+ if err != nil {
+ t.Close()
+ return nil, ConnectionErrorf("transport: %v", err)
+ }
+ if n != len(clientPreface) {
+ t.Close()
+ return nil, ConnectionErrorf("transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface))
+ }
+ if initialWindowSize != defaultWindowSize {
+ err = t.framer.writeSettings(true, http2.Setting{http2.SettingInitialWindowSize, uint32(initialWindowSize)})
+ } else {
+ err = t.framer.writeSettings(true)
+ }
+ if err != nil {
+ t.Close()
+ return nil, ConnectionErrorf("transport: %v", err)
+ }
+ // Adjust the connection flow control window if needed.
+ if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 {
+ if err := t.framer.writeWindowUpdate(true, 0, delta); err != nil {
+ t.Close()
+ return nil, ConnectionErrorf("transport: %v", err)
+ }
+ }
+ go t.controller()
+ t.writableChan <- 0
+ return t, nil
+}
+
+func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
+ // TODO(zhaoq): Handle uint32 overflow of Stream.id.
+ s := &Stream{
+ id: t.nextID,
+ method: callHdr.Method,
+ sendCompress: callHdr.SendCompress,
+ buf: newRecvBuffer(),
+ fc: &inFlow{limit: initialWindowSize},
+ sendQuotaPool: newQuotaPool(int(t.streamSendQuota)),
+ headerChan: make(chan struct{}),
+ }
+ t.nextID += 2
+ s.windowHandler = func(n int) {
+ t.updateWindow(s, uint32(n))
+ }
+ // Make a stream be able to cancel the pending operations by itself.
+ s.ctx, s.cancel = context.WithCancel(ctx)
+ s.dec = &recvBufferReader{
+ ctx: s.ctx,
+ recv: s.buf,
+ }
+ return s
+}
+
+// NewStream creates a stream and register it into the transport as "active"
+// streams.
+func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) {
+ // Record the timeout value on the context.
+ var timeout time.Duration
+ if dl, ok := ctx.Deadline(); ok {
+ timeout = dl.Sub(time.Now())
+ }
+ select {
+ case <-ctx.Done():
+ return nil, ContextErr(ctx.Err())
+ default:
+ }
+ pr := &peer.Peer{
+ Addr: t.conn.RemoteAddr(),
+ }
+ // Attach Auth info if there is any.
+ if t.authInfo != nil {
+ pr.AuthInfo = t.authInfo
+ }
+ ctx = peer.NewContext(ctx, pr)
+ authData := make(map[string]string)
+ for _, c := range t.authCreds {
+ // Construct URI required to get auth request metadata.
+ var port string
+ if pos := strings.LastIndex(t.target, ":"); pos != -1 {
+ // Omit port if it is the default one.
+ if t.target[pos+1:] != "443" {
+ port = ":" + t.target[pos+1:]
+ }
+ }
+ pos := strings.LastIndex(callHdr.Method, "/")
+ if pos == -1 {
+ return nil, StreamErrorf(codes.InvalidArgument, "transport: malformed method name: %q", callHdr.Method)
+ }
+ audience := "https://" + callHdr.Host + port + callHdr.Method[:pos]
+ data, err := c.GetRequestMetadata(ctx, audience)
+ if err != nil {
+ return nil, StreamErrorf(codes.InvalidArgument, "transport: %v", err)
+ }
+ for k, v := range data {
+ authData[k] = v
+ }
+ }
+ t.mu.Lock()
+ if t.activeStreams == nil {
+ t.mu.Unlock()
+ return nil, ErrConnClosing
+ }
+ if t.state != reachable {
+ t.mu.Unlock()
+ return nil, ErrConnClosing
+ }
+ checkStreamsQuota := t.streamsQuota != nil
+ t.mu.Unlock()
+ if checkStreamsQuota {
+ sq, err := wait(ctx, t.shutdownChan, t.streamsQuota.acquire())
+ if err != nil {
+ return nil, err
+ }
+ // Returns the quota balance back.
+ if sq > 1 {
+ t.streamsQuota.add(sq - 1)
+ }
+ }
+ if _, err := wait(ctx, t.shutdownChan, t.writableChan); err != nil {
+ // Return the quota back now because there is no stream returned to the caller.
+ if _, ok := err.(StreamError); ok && checkStreamsQuota {
+ t.streamsQuota.add(1)
+ }
+ return nil, err
+ }
+ t.mu.Lock()
+ if t.state != reachable {
+ t.mu.Unlock()
+ return nil, ErrConnClosing
+ }
+ s := t.newStream(ctx, callHdr)
+ t.activeStreams[s.id] = s
+
+ // This stream is not counted when applySetings(...) initialize t.streamsQuota.
+ // Reset t.streamsQuota to the right value.
+ var reset bool
+ if !checkStreamsQuota && t.streamsQuota != nil {
+ reset = true
+ }
+ t.mu.Unlock()
+ if reset {
+ t.streamsQuota.reset(-1)
+ }
+
+ // HPACK encodes various headers. Note that once WriteField(...) is
+ // called, the corresponding headers/continuation frame has to be sent
+ // because hpack.Encoder is stateful.
+ t.hBuf.Reset()
+ t.hEnc.WriteField(hpack.HeaderField{Name: ":method", Value: "POST"})
+ t.hEnc.WriteField(hpack.HeaderField{Name: ":scheme", Value: t.scheme})
+ t.hEnc.WriteField(hpack.HeaderField{Name: ":path", Value: callHdr.Method})
+ t.hEnc.WriteField(hpack.HeaderField{Name: ":authority", Value: callHdr.Host})
+ t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
+ t.hEnc.WriteField(hpack.HeaderField{Name: "user-agent", Value: t.userAgent})
+ t.hEnc.WriteField(hpack.HeaderField{Name: "te", Value: "trailers"})
+
+ if callHdr.SendCompress != "" {
+ t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress})
+ }
+ if timeout > 0 {
+ t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-timeout", Value: timeoutEncode(timeout)})
+ }
+ for k, v := range authData {
+ // Capital header names are illegal in HTTP/2.
+ k = strings.ToLower(k)
+ t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: v})
+ }
+ var (
+ hasMD bool
+ endHeaders bool
+ )
+ if md, ok := metadata.FromContext(ctx); ok {
+ hasMD = true
+ for k, v := range md {
+ // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set.
+ if isReservedHeader(k) {
+ continue
+ }
+ for _, entry := range v {
+ t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
+ }
+ }
+ }
+ first := true
+ // Sends the headers in a single batch even when they span multiple frames.
+ for !endHeaders {
+ size := t.hBuf.Len()
+ if size > http2MaxFrameLen {
+ size = http2MaxFrameLen
+ } else {
+ endHeaders = true
+ }
+ var flush bool
+ if endHeaders && (hasMD || callHdr.Flush) {
+ flush = true
+ }
+ if first {
+ // Sends a HeadersFrame to server to start a new stream.
+ p := http2.HeadersFrameParam{
+ StreamID: s.id,
+ BlockFragment: t.hBuf.Next(size),
+ EndStream: false,
+ EndHeaders: endHeaders,
+ }
+ // Do a force flush for the buffered frames iff it is the last headers frame
+ // and there is header metadata to be sent. Otherwise, there is flushing until
+ // the corresponding data frame is written.
+ err = t.framer.writeHeaders(flush, p)
+ first = false
+ } else {
+ // Sends Continuation frames for the leftover headers.
+ err = t.framer.writeContinuation(flush, s.id, endHeaders, t.hBuf.Next(size))
+ }
+ if err != nil {
+ t.notifyError(err)
+ return nil, ConnectionErrorf("transport: %v", err)
+ }
+ }
+ t.writableChan <- 0
+ return s, nil
+}
+
+// CloseStream clears the footprint of a stream when the stream is not needed any more.
+// This must not be executed in reader's goroutine.
+func (t *http2Client) CloseStream(s *Stream, err error) {
+ var updateStreams bool
+ t.mu.Lock()
+ if t.activeStreams == nil {
+ t.mu.Unlock()
+ return
+ }
+ if t.streamsQuota != nil {
+ updateStreams = true
+ }
+ if t.state == draining && len(t.activeStreams) == 1 {
+ // The transport is draining and s is the last live stream on t.
+ t.mu.Unlock()
+ t.Close()
+ return
+ }
+ delete(t.activeStreams, s.id)
+ t.mu.Unlock()
+ if updateStreams {
+ t.streamsQuota.add(1)
+ }
+ // In case stream sending and receiving are invoked in separate
+ // goroutines (e.g., bi-directional streaming), the caller needs
+ // to call cancel on the stream to interrupt the blocking on
+ // other goroutines.
+ s.cancel()
+ s.mu.Lock()
+ if q := s.fc.resetPendingData(); q > 0 {
+ if n := t.fc.onRead(q); n > 0 {
+ t.controlBuf.put(&windowUpdate{0, n})
+ }
+ }
+ if s.state == streamDone {
+ s.mu.Unlock()
+ return
+ }
+ if !s.headerDone {
+ close(s.headerChan)
+ s.headerDone = true
+ }
+ s.state = streamDone
+ s.mu.Unlock()
+ if _, ok := err.(StreamError); ok {
+ t.controlBuf.put(&resetStream{s.id, http2.ErrCodeCancel})
+ }
+}
+
+// Close kicks off the shutdown process of the transport. This should be called
+// only once on a transport. Once it is called, the transport should not be
+// accessed any more.
+func (t *http2Client) Close() (err error) {
+ t.mu.Lock()
+ if t.state == reachable {
+ close(t.errorChan)
+ }
+ if t.state == closing {
+ t.mu.Unlock()
+ return
+ }
+ t.state = closing
+ t.mu.Unlock()
+ close(t.shutdownChan)
+ err = t.conn.Close()
+ t.mu.Lock()
+ streams := t.activeStreams
+ t.activeStreams = nil
+ t.mu.Unlock()
+ // Notify all active streams.
+ for _, s := range streams {
+ s.mu.Lock()
+ if !s.headerDone {
+ close(s.headerChan)
+ s.headerDone = true
+ }
+ s.mu.Unlock()
+ s.write(recvMsg{err: ErrConnClosing})
+ }
+ return
+}
+
+func (t *http2Client) GracefulClose() error {
+ t.mu.Lock()
+ if t.state == closing {
+ t.mu.Unlock()
+ return nil
+ }
+ if t.state == draining {
+ t.mu.Unlock()
+ return nil
+ }
+ t.state = draining
+ active := len(t.activeStreams)
+ t.mu.Unlock()
+ if active == 0 {
+ return t.Close()
+ }
+ return nil
+}
+
+// Write formats the data into HTTP2 data frame(s) and sends it out. The caller
+// should proceed only if Write returns nil.
+// TODO(zhaoq): opts.Delay is ignored in this implementation. Support it later
+// if it improves the performance.
+func (t *http2Client) Write(s *Stream, data []byte, opts *Options) error {
+ r := bytes.NewBuffer(data)
+ for {
+ var p []byte
+ if r.Len() > 0 {
+ size := http2MaxFrameLen
+ s.sendQuotaPool.add(0)
+ // Wait until the stream has some quota to send the data.
+ sq, err := wait(s.ctx, t.shutdownChan, s.sendQuotaPool.acquire())
+ if err != nil {
+ return err
+ }
+ t.sendQuotaPool.add(0)
+ // Wait until the transport has some quota to send the data.
+ tq, err := wait(s.ctx, t.shutdownChan, t.sendQuotaPool.acquire())
+ if err != nil {
+ if _, ok := err.(StreamError); ok {
+ t.sendQuotaPool.cancel()
+ }
+ return err
+ }
+ if sq < size {
+ size = sq
+ }
+ if tq < size {
+ size = tq
+ }
+ p = r.Next(size)
+ ps := len(p)
+ if ps < sq {
+ // Overbooked stream quota. Return it back.
+ s.sendQuotaPool.add(sq - ps)
+ }
+ if ps < tq {
+ // Overbooked transport quota. Return it back.
+ t.sendQuotaPool.add(tq - ps)
+ }
+ }
+ var (
+ endStream bool
+ forceFlush bool
+ )
+ if opts.Last && r.Len() == 0 {
+ endStream = true
+ }
+ // Indicate there is a writer who is about to write a data frame.
+ t.framer.adjustNumWriters(1)
+ // Got some quota. Try to acquire writing privilege on the transport.
+ if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil {
+ if _, ok := err.(StreamError); ok {
+ // Return the connection quota back.
+ t.sendQuotaPool.add(len(p))
+ }
+ if t.framer.adjustNumWriters(-1) == 0 {
+ // This writer is the last one in this batch and has the
+ // responsibility to flush the buffered frames. It queues
+ // a flush request to controlBuf instead of flushing directly
+ // in order to avoid the race with other writing or flushing.
+ t.controlBuf.put(&flushIO{})
+ }
+ return err
+ }
+ select {
+ case <-s.ctx.Done():
+ t.sendQuotaPool.add(len(p))
+ if t.framer.adjustNumWriters(-1) == 0 {
+ t.controlBuf.put(&flushIO{})
+ }
+ t.writableChan <- 0
+ return ContextErr(s.ctx.Err())
+ default:
+ }
+ if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 {
+ // Do a force flush iff this is last frame for the entire gRPC message
+ // and the caller is the only writer at this moment.
+ forceFlush = true
+ }
+ // If WriteData fails, all the pending streams will be handled
+ // by http2Client.Close(). No explicit CloseStream() needs to be
+ // invoked.
+ if err := t.framer.writeData(forceFlush, s.id, endStream, p); err != nil {
+ t.notifyError(err)
+ return ConnectionErrorf("transport: %v", err)
+ }
+ if t.framer.adjustNumWriters(-1) == 0 {
+ t.framer.flushWrite()
+ }
+ t.writableChan <- 0
+ if r.Len() == 0 {
+ break
+ }
+ }
+ if !opts.Last {
+ return nil
+ }
+ s.mu.Lock()
+ if s.state != streamDone {
+ if s.state == streamReadDone {
+ s.state = streamDone
+ } else {
+ s.state = streamWriteDone
+ }
+ }
+ s.mu.Unlock()
+ return nil
+}
+
+func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ s, ok := t.activeStreams[f.Header().StreamID]
+ return s, ok
+}
+
+// updateWindow adjusts the inbound quota for the stream and the transport.
+// Window updates will deliver to the controller for sending when
+// the cumulative quota exceeds the corresponding threshold.
+func (t *http2Client) updateWindow(s *Stream, n uint32) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if s.state == streamDone {
+ return
+ }
+ if w := t.fc.onRead(n); w > 0 {
+ t.controlBuf.put(&windowUpdate{0, w})
+ }
+ if w := s.fc.onRead(n); w > 0 {
+ t.controlBuf.put(&windowUpdate{s.id, w})
+ }
+}
+
+func (t *http2Client) handleData(f *http2.DataFrame) {
+ size := len(f.Data())
+ if err := t.fc.onData(uint32(size)); err != nil {
+ t.notifyError(ConnectionErrorf("%v", err))
+ return
+ }
+ // Select the right stream to dispatch.
+ s, ok := t.getStream(f)
+ if !ok {
+ if w := t.fc.onRead(uint32(size)); w > 0 {
+ t.controlBuf.put(&windowUpdate{0, w})
+ }
+ return
+ }
+ if size > 0 {
+ s.mu.Lock()
+ if s.state == streamDone {
+ s.mu.Unlock()
+ // The stream has been closed. Release the corresponding quota.
+ if w := t.fc.onRead(uint32(size)); w > 0 {
+ t.controlBuf.put(&windowUpdate{0, w})
+ }
+ return
+ }
+ if err := s.fc.onData(uint32(size)); err != nil {
+ s.state = streamDone
+ s.statusCode = codes.Internal
+ s.statusDesc = err.Error()
+ s.mu.Unlock()
+ s.write(recvMsg{err: io.EOF})
+ t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl})
+ return
+ }
+ s.mu.Unlock()
+ // TODO(bradfitz, zhaoq): A copy is required here because there is no
+ // guarantee f.Data() is consumed before the arrival of next frame.
+ // Can this copy be eliminated?
+ data := make([]byte, size)
+ copy(data, f.Data())
+ s.write(recvMsg{data: data})
+ }
+ // The server has closed the stream without sending trailers. Record that
+ // the read direction is closed, and set the status appropriately.
+ if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) {
+ s.mu.Lock()
+ if s.state == streamWriteDone {
+ s.state = streamDone
+ } else {
+ s.state = streamReadDone
+ }
+ s.statusCode = codes.Internal
+ s.statusDesc = "server closed the stream without sending trailers"
+ s.mu.Unlock()
+ s.write(recvMsg{err: io.EOF})
+ }
+}
+
+func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
+ s, ok := t.getStream(f)
+ if !ok {
+ return
+ }
+ s.mu.Lock()
+ if s.state == streamDone {
+ s.mu.Unlock()
+ return
+ }
+ s.state = streamDone
+ if !s.headerDone {
+ close(s.headerChan)
+ s.headerDone = true
+ }
+ s.statusCode, ok = http2ErrConvTab[http2.ErrCode(f.ErrCode)]
+ if !ok {
+ grpclog.Println("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error ", f.ErrCode)
+ s.statusCode = codes.Unknown
+ }
+ s.mu.Unlock()
+ s.write(recvMsg{err: io.EOF})
+}
+
+func (t *http2Client) handleSettings(f *http2.SettingsFrame) {
+ if f.IsAck() {
+ return
+ }
+ var ss []http2.Setting
+ f.ForeachSetting(func(s http2.Setting) error {
+ ss = append(ss, s)
+ return nil
+ })
+ // The settings will be applied once the ack is sent.
+ t.controlBuf.put(&settings{ack: true, ss: ss})
+}
+
+func (t *http2Client) handlePing(f *http2.PingFrame) {
+ pingAck := &ping{ack: true}
+ copy(pingAck.data[:], f.Data[:])
+ t.controlBuf.put(pingAck)
+}
+
+func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
+ // TODO(zhaoq): GoAwayFrame handler to be implemented
+}
+
+func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) {
+ id := f.Header().StreamID
+ incr := f.Increment
+ if id == 0 {
+ t.sendQuotaPool.add(int(incr))
+ return
+ }
+ if s, ok := t.getStream(f); ok {
+ s.sendQuotaPool.add(int(incr))
+ }
+}
+
+// operateHeaders takes action on the decoded headers.
+func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
+ s, ok := t.getStream(frame)
+ if !ok {
+ return
+ }
+ var state decodeState
+ for _, hf := range frame.Fields {
+ state.processHeaderField(hf)
+ }
+ if state.err != nil {
+ s.write(recvMsg{err: state.err})
+ // Something wrong. Stops reading even when there is remaining.
+ return
+ }
+
+ endStream := frame.StreamEnded()
+
+ s.mu.Lock()
+ if !endStream {
+ s.recvCompress = state.encoding
+ }
+ if !s.headerDone {
+ if !endStream && len(state.mdata) > 0 {
+ s.header = state.mdata
+ }
+ close(s.headerChan)
+ s.headerDone = true
+ }
+ if !endStream || s.state == streamDone {
+ s.mu.Unlock()
+ return
+ }
+
+ if len(state.mdata) > 0 {
+ s.trailer = state.mdata
+ }
+ s.state = streamDone
+ s.statusCode = state.statusCode
+ s.statusDesc = state.statusDesc
+ s.mu.Unlock()
+
+ s.write(recvMsg{err: io.EOF})
+}
+
+func handleMalformedHTTP2(s *Stream, err error) {
+ s.mu.Lock()
+ if !s.headerDone {
+ close(s.headerChan)
+ s.headerDone = true
+ }
+ s.mu.Unlock()
+ s.write(recvMsg{err: err})
+}
+
+// reader runs as a separate goroutine in charge of reading data from network
+// connection.
+//
+// TODO(zhaoq): currently one reader per transport. Investigate whether this is
+// optimal.
+// TODO(zhaoq): Check the validity of the incoming frame sequence.
+func (t *http2Client) reader() {
+ // Check the validity of server preface.
+ frame, err := t.framer.readFrame()
+ if err != nil {
+ t.notifyError(err)
+ return
+ }
+ sf, ok := frame.(*http2.SettingsFrame)
+ if !ok {
+ t.notifyError(err)
+ return
+ }
+ t.handleSettings(sf)
+
+ // loop to keep reading incoming messages on this transport.
+ for {
+ frame, err := t.framer.readFrame()
+ if err != nil {
+ // Abort an active stream if the http2.Framer returns a
+ // http2.StreamError. This can happen only if the server's response
+ // is malformed http2.
+ if se, ok := err.(http2.StreamError); ok {
+ t.mu.Lock()
+ s := t.activeStreams[se.StreamID]
+ t.mu.Unlock()
+ if s != nil {
+ // use error detail to provide better err message
+ handleMalformedHTTP2(s, StreamErrorf(http2ErrConvTab[se.Code], "%v", t.framer.errorDetail()))
+ }
+ continue
+ } else {
+ // Transport error.
+ t.notifyError(err)
+ return
+ }
+ }
+ switch frame := frame.(type) {
+ case *http2.MetaHeadersFrame:
+ t.operateHeaders(frame)
+ case *http2.DataFrame:
+ t.handleData(frame)
+ case *http2.RSTStreamFrame:
+ t.handleRSTStream(frame)
+ case *http2.SettingsFrame:
+ t.handleSettings(frame)
+ case *http2.PingFrame:
+ t.handlePing(frame)
+ case *http2.GoAwayFrame:
+ t.handleGoAway(frame)
+ case *http2.WindowUpdateFrame:
+ t.handleWindowUpdate(frame)
+ default:
+ grpclog.Printf("transport: http2Client.reader got unhandled frame type %v.", frame)
+ }
+ }
+}
+
+func (t *http2Client) applySettings(ss []http2.Setting) {
+ for _, s := range ss {
+ switch s.ID {
+ case http2.SettingMaxConcurrentStreams:
+ // TODO(zhaoq): This is a hack to avoid significant refactoring of the
+ // code to deal with the unrealistic int32 overflow. Probably will try
+ // to find a better way to handle this later.
+ if s.Val > math.MaxInt32 {
+ s.Val = math.MaxInt32
+ }
+ t.mu.Lock()
+ reset := t.streamsQuota != nil
+ if !reset {
+ t.streamsQuota = newQuotaPool(int(s.Val) - len(t.activeStreams))
+ }
+ ms := t.maxStreams
+ t.maxStreams = int(s.Val)
+ t.mu.Unlock()
+ if reset {
+ t.streamsQuota.reset(int(s.Val) - ms)
+ }
+ case http2.SettingInitialWindowSize:
+ t.mu.Lock()
+ for _, stream := range t.activeStreams {
+ // Adjust the sending quota for each stream.
+ stream.sendQuotaPool.reset(int(s.Val - t.streamSendQuota))
+ }
+ t.streamSendQuota = s.Val
+ t.mu.Unlock()
+ }
+ }
+}
+
+// controller running in a separate goroutine takes charge of sending control
+// frames (e.g., window update, reset stream, setting, etc.) to the server.
+func (t *http2Client) controller() {
+ for {
+ select {
+ case i := <-t.controlBuf.get():
+ t.controlBuf.load()
+ select {
+ case <-t.writableChan:
+ switch i := i.(type) {
+ case *windowUpdate:
+ t.framer.writeWindowUpdate(true, i.streamID, i.increment)
+ case *settings:
+ if i.ack {
+ t.framer.writeSettingsAck(true)
+ t.applySettings(i.ss)
+ } else {
+ t.framer.writeSettings(true, i.ss...)
+ }
+ case *resetStream:
+ t.framer.writeRSTStream(true, i.streamID, i.code)
+ case *flushIO:
+ t.framer.flushWrite()
+ case *ping:
+ t.framer.writePing(true, i.ack, i.data)
+ default:
+ grpclog.Printf("transport: http2Client.controller got unexpected item type %v\n", i)
+ }
+ t.writableChan <- 0
+ continue
+ case <-t.shutdownChan:
+ return
+ }
+ case <-t.shutdownChan:
+ return
+ }
+ }
+}
+
+func (t *http2Client) Error() <-chan struct{} {
+ return t.errorChan
+}
+
+func (t *http2Client) notifyError(err error) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ // make sure t.errorChan is closed only once.
+ if t.state == reachable {
+ t.state = unreachable
+ close(t.errorChan)
+ grpclog.Printf("transport: http2Client.notifyError got notified that the client transport was broken %v.", err)
+ }
+}
diff --git a/vendor/google.golang.org/grpc/transport/http2_server.go b/vendor/google.golang.org/grpc/transport/http2_server.go
new file mode 100644
index 0000000..1c4d585
--- /dev/null
+++ b/vendor/google.golang.org/grpc/transport/http2_server.go
@@ -0,0 +1,743 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+package transport
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "math"
+ "net"
+ "strconv"
+ "sync"
+
+ "golang.org/x/net/context"
+ "golang.org/x/net/http2"
+ "golang.org/x/net/http2/hpack"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/metadata"
+ "google.golang.org/grpc/peer"
+)
+
+// ErrIllegalHeaderWrite indicates that setting header is illegal because of
+// the stream's state.
+var ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called")
+
+// http2Server implements the ServerTransport interface with HTTP2.
+type http2Server struct {
+ conn net.Conn
+ maxStreamID uint32 // max stream ID ever seen
+ authInfo credentials.AuthInfo // auth info about the connection
+ // writableChan synchronizes write access to the transport.
+ // A writer acquires the write lock by receiving a value on writableChan
+ // and releases it by sending on writableChan.
+ writableChan chan int
+ // shutdownChan is closed when Close is called.
+ // Blocking operations should select on shutdownChan to avoid
+ // blocking forever after Close.
+ shutdownChan chan struct{}
+ framer *framer
+ hBuf *bytes.Buffer // the buffer for HPACK encoding
+ hEnc *hpack.Encoder // HPACK encoder
+
+ // The max number of concurrent streams.
+ maxStreams uint32
+ // controlBuf delivers all the control related tasks (e.g., window
+ // updates, reset streams, and various settings) to the controller.
+ controlBuf *recvBuffer
+ fc *inFlow
+ // sendQuotaPool provides flow control to outbound message.
+ sendQuotaPool *quotaPool
+
+ mu sync.Mutex // guard the following
+ state transportState
+ activeStreams map[uint32]*Stream
+ // the per-stream outbound flow control window size set by the peer.
+ streamSendQuota uint32
+}
+
+// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is
+// returned if something goes wrong.
+func newHTTP2Server(conn net.Conn, maxStreams uint32, authInfo credentials.AuthInfo) (_ ServerTransport, err error) {
+ framer := newFramer(conn)
+ // Send initial settings as connection preface to client.
+ var settings []http2.Setting
+ // TODO(zhaoq): Have a better way to signal "no limit" because 0 is
+ // permitted in the HTTP2 spec.
+ if maxStreams == 0 {
+ maxStreams = math.MaxUint32
+ } else {
+ settings = append(settings, http2.Setting{http2.SettingMaxConcurrentStreams, maxStreams})
+ }
+ if initialWindowSize != defaultWindowSize {
+ settings = append(settings, http2.Setting{http2.SettingInitialWindowSize, uint32(initialWindowSize)})
+ }
+ if err := framer.writeSettings(true, settings...); err != nil {
+ return nil, ConnectionErrorf("transport: %v", err)
+ }
+ // Adjust the connection flow control window if needed.
+ if delta := uint32(initialConnWindowSize - defaultWindowSize); delta > 0 {
+ if err := framer.writeWindowUpdate(true, 0, delta); err != nil {
+ return nil, ConnectionErrorf("transport: %v", err)
+ }
+ }
+ var buf bytes.Buffer
+ t := &http2Server{
+ conn: conn,
+ authInfo: authInfo,
+ framer: framer,
+ hBuf: &buf,
+ hEnc: hpack.NewEncoder(&buf),
+ maxStreams: maxStreams,
+ controlBuf: newRecvBuffer(),
+ fc: &inFlow{limit: initialConnWindowSize},
+ sendQuotaPool: newQuotaPool(defaultWindowSize),
+ state: reachable,
+ writableChan: make(chan int, 1),
+ shutdownChan: make(chan struct{}),
+ activeStreams: make(map[uint32]*Stream),
+ streamSendQuota: defaultWindowSize,
+ }
+ go t.controller()
+ t.writableChan <- 0
+ return t, nil
+}
+
+// operateHeader takes action on the decoded headers.
+func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream)) {
+ buf := newRecvBuffer()
+ s := &Stream{
+ id: frame.Header().StreamID,
+ st: t,
+ buf: buf,
+ fc: &inFlow{limit: initialWindowSize},
+ }
+
+ var state decodeState
+ for _, hf := range frame.Fields {
+ state.processHeaderField(hf)
+ }
+ if err := state.err; err != nil {
+ if se, ok := err.(StreamError); ok {
+ t.controlBuf.put(&resetStream{s.id, statusCodeConvTab[se.Code]})
+ }
+ return
+ }
+
+ if frame.StreamEnded() {
+ // s is just created by the caller. No lock needed.
+ s.state = streamReadDone
+ }
+ s.recvCompress = state.encoding
+ if state.timeoutSet {
+ s.ctx, s.cancel = context.WithTimeout(context.TODO(), state.timeout)
+ } else {
+ s.ctx, s.cancel = context.WithCancel(context.TODO())
+ }
+ pr := &peer.Peer{
+ Addr: t.conn.RemoteAddr(),
+ }
+ // Attach Auth info if there is any.
+ if t.authInfo != nil {
+ pr.AuthInfo = t.authInfo
+ }
+ s.ctx = peer.NewContext(s.ctx, pr)
+ // Cache the current stream to the context so that the server application
+ // can find out. Required when the server wants to send some metadata
+ // back to the client (unary call only).
+ s.ctx = newContextWithStream(s.ctx, s)
+ // Attach the received metadata to the context.
+ if len(state.mdata) > 0 {
+ s.ctx = metadata.NewContext(s.ctx, state.mdata)
+ }
+
+ s.dec = &recvBufferReader{
+ ctx: s.ctx,
+ recv: s.buf,
+ }
+ s.recvCompress = state.encoding
+ s.method = state.method
+ t.mu.Lock()
+ if t.state != reachable {
+ t.mu.Unlock()
+ return
+ }
+ if uint32(len(t.activeStreams)) >= t.maxStreams {
+ t.mu.Unlock()
+ t.controlBuf.put(&resetStream{s.id, http2.ErrCodeRefusedStream})
+ return
+ }
+ s.sendQuotaPool = newQuotaPool(int(t.streamSendQuota))
+ t.activeStreams[s.id] = s
+ t.mu.Unlock()
+ s.windowHandler = func(n int) {
+ t.updateWindow(s, uint32(n))
+ }
+ handle(s)
+}
+
+// HandleStreams receives incoming streams using the given handler. This is
+// typically run in a separate goroutine.
+func (t *http2Server) HandleStreams(handle func(*Stream)) {
+ // Check the validity of client preface.
+ preface := make([]byte, len(clientPreface))
+ if _, err := io.ReadFull(t.conn, preface); err != nil {
+ grpclog.Printf("transport: http2Server.HandleStreams failed to receive the preface from client: %v", err)
+ t.Close()
+ return
+ }
+ if !bytes.Equal(preface, clientPreface) {
+ grpclog.Printf("transport: http2Server.HandleStreams received bogus greeting from client: %q", preface)
+ t.Close()
+ return
+ }
+
+ frame, err := t.framer.readFrame()
+ if err != nil {
+ grpclog.Printf("transport: http2Server.HandleStreams failed to read frame: %v", err)
+ t.Close()
+ return
+ }
+ sf, ok := frame.(*http2.SettingsFrame)
+ if !ok {
+ grpclog.Printf("transport: http2Server.HandleStreams saw invalid preface type %T from client", frame)
+ t.Close()
+ return
+ }
+ t.handleSettings(sf)
+
+ for {
+ frame, err := t.framer.readFrame()
+ if err != nil {
+ if se, ok := err.(http2.StreamError); ok {
+ t.mu.Lock()
+ s := t.activeStreams[se.StreamID]
+ t.mu.Unlock()
+ if s != nil {
+ t.closeStream(s)
+ }
+ t.controlBuf.put(&resetStream{se.StreamID, se.Code})
+ continue
+ }
+ t.Close()
+ return
+ }
+ switch frame := frame.(type) {
+ case *http2.MetaHeadersFrame:
+ id := frame.Header().StreamID
+ if id%2 != 1 || id <= t.maxStreamID {
+ // illegal gRPC stream id.
+ grpclog.Println("transport: http2Server.HandleStreams received an illegal stream id: ", id)
+ t.Close()
+ break
+ }
+ t.maxStreamID = id
+ t.operateHeaders(frame, handle)
+ case *http2.DataFrame:
+ t.handleData(frame)
+ case *http2.RSTStreamFrame:
+ t.handleRSTStream(frame)
+ case *http2.SettingsFrame:
+ t.handleSettings(frame)
+ case *http2.PingFrame:
+ t.handlePing(frame)
+ case *http2.WindowUpdateFrame:
+ t.handleWindowUpdate(frame)
+ case *http2.GoAwayFrame:
+ break
+ default:
+ grpclog.Printf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame)
+ }
+ }
+}
+
+func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ if t.activeStreams == nil {
+ // The transport is closing.
+ return nil, false
+ }
+ s, ok := t.activeStreams[f.Header().StreamID]
+ if !ok {
+ // The stream is already done.
+ return nil, false
+ }
+ return s, true
+}
+
+// updateWindow adjusts the inbound quota for the stream and the transport.
+// Window updates will deliver to the controller for sending when
+// the cumulative quota exceeds the corresponding threshold.
+func (t *http2Server) updateWindow(s *Stream, n uint32) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if s.state == streamDone {
+ return
+ }
+ if w := t.fc.onRead(n); w > 0 {
+ t.controlBuf.put(&windowUpdate{0, w})
+ }
+ if w := s.fc.onRead(n); w > 0 {
+ t.controlBuf.put(&windowUpdate{s.id, w})
+ }
+}
+
+func (t *http2Server) handleData(f *http2.DataFrame) {
+ size := len(f.Data())
+ if err := t.fc.onData(uint32(size)); err != nil {
+ grpclog.Printf("transport: http2Server %v", err)
+ t.Close()
+ return
+ }
+ // Select the right stream to dispatch.
+ s, ok := t.getStream(f)
+ if !ok {
+ if w := t.fc.onRead(uint32(size)); w > 0 {
+ t.controlBuf.put(&windowUpdate{0, w})
+ }
+ return
+ }
+ if size > 0 {
+ s.mu.Lock()
+ if s.state == streamDone {
+ s.mu.Unlock()
+ // The stream has been closed. Release the corresponding quota.
+ if w := t.fc.onRead(uint32(size)); w > 0 {
+ t.controlBuf.put(&windowUpdate{0, w})
+ }
+ return
+ }
+ if err := s.fc.onData(uint32(size)); err != nil {
+ s.mu.Unlock()
+ t.closeStream(s)
+ t.controlBuf.put(&resetStream{s.id, http2.ErrCodeFlowControl})
+ return
+ }
+ s.mu.Unlock()
+ // TODO(bradfitz, zhaoq): A copy is required here because there is no
+ // guarantee f.Data() is consumed before the arrival of next frame.
+ // Can this copy be eliminated?
+ data := make([]byte, size)
+ copy(data, f.Data())
+ s.write(recvMsg{data: data})
+ }
+ if f.Header().Flags.Has(http2.FlagDataEndStream) {
+ // Received the end of stream from the client.
+ s.mu.Lock()
+ if s.state != streamDone {
+ if s.state == streamWriteDone {
+ s.state = streamDone
+ } else {
+ s.state = streamReadDone
+ }
+ }
+ s.mu.Unlock()
+ s.write(recvMsg{err: io.EOF})
+ }
+}
+
+func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) {
+ s, ok := t.getStream(f)
+ if !ok {
+ return
+ }
+ t.closeStream(s)
+}
+
+func (t *http2Server) handleSettings(f *http2.SettingsFrame) {
+ if f.IsAck() {
+ return
+ }
+ var ss []http2.Setting
+ f.ForeachSetting(func(s http2.Setting) error {
+ ss = append(ss, s)
+ return nil
+ })
+ // The settings will be applied once the ack is sent.
+ t.controlBuf.put(&settings{ack: true, ss: ss})
+}
+
+func (t *http2Server) handlePing(f *http2.PingFrame) {
+ pingAck := &ping{ack: true}
+ copy(pingAck.data[:], f.Data[:])
+ t.controlBuf.put(pingAck)
+}
+
+func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) {
+ id := f.Header().StreamID
+ incr := f.Increment
+ if id == 0 {
+ t.sendQuotaPool.add(int(incr))
+ return
+ }
+ if s, ok := t.getStream(f); ok {
+ s.sendQuotaPool.add(int(incr))
+ }
+}
+
+func (t *http2Server) writeHeaders(s *Stream, b *bytes.Buffer, endStream bool) error {
+ first := true
+ endHeaders := false
+ var err error
+ // Sends the headers in a single batch.
+ for !endHeaders {
+ size := t.hBuf.Len()
+ if size > http2MaxFrameLen {
+ size = http2MaxFrameLen
+ } else {
+ endHeaders = true
+ }
+ if first {
+ p := http2.HeadersFrameParam{
+ StreamID: s.id,
+ BlockFragment: b.Next(size),
+ EndStream: endStream,
+ EndHeaders: endHeaders,
+ }
+ err = t.framer.writeHeaders(endHeaders, p)
+ first = false
+ } else {
+ err = t.framer.writeContinuation(endHeaders, s.id, endHeaders, b.Next(size))
+ }
+ if err != nil {
+ t.Close()
+ return ConnectionErrorf("transport: %v", err)
+ }
+ }
+ return nil
+}
+
+// WriteHeader sends the header metedata md back to the client.
+func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error {
+ s.mu.Lock()
+ if s.headerOk || s.state == streamDone {
+ s.mu.Unlock()
+ return ErrIllegalHeaderWrite
+ }
+ s.headerOk = true
+ s.mu.Unlock()
+ if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil {
+ return err
+ }
+ t.hBuf.Reset()
+ t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
+ t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
+ if s.sendCompress != "" {
+ t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
+ }
+ for k, v := range md {
+ if isReservedHeader(k) {
+ // Clients don't tolerate reading restricted headers after some non restricted ones were sent.
+ continue
+ }
+ for _, entry := range v {
+ t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
+ }
+ }
+ if err := t.writeHeaders(s, t.hBuf, false); err != nil {
+ return err
+ }
+ t.writableChan <- 0
+ return nil
+}
+
+// WriteStatus sends stream status to the client and terminates the stream.
+// There is no further I/O operations being able to perform on this stream.
+// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early
+// OK is adopted.
+func (t *http2Server) WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error {
+ var headersSent bool
+ s.mu.Lock()
+ if s.state == streamDone {
+ s.mu.Unlock()
+ return nil
+ }
+ if s.headerOk {
+ headersSent = true
+ }
+ s.mu.Unlock()
+ if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil {
+ return err
+ }
+ t.hBuf.Reset()
+ if !headersSent {
+ t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
+ t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
+ }
+ t.hEnc.WriteField(
+ hpack.HeaderField{
+ Name: "grpc-status",
+ Value: strconv.Itoa(int(statusCode)),
+ })
+ t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-message", Value: statusDesc})
+ // Attach the trailer metadata.
+ for k, v := range s.trailer {
+ // Clients don't tolerate reading restricted headers after some non restricted ones were sent.
+ if isReservedHeader(k) {
+ continue
+ }
+ for _, entry := range v {
+ t.hEnc.WriteField(hpack.HeaderField{Name: k, Value: entry})
+ }
+ }
+ if err := t.writeHeaders(s, t.hBuf, true); err != nil {
+ t.Close()
+ return err
+ }
+ t.closeStream(s)
+ t.writableChan <- 0
+ return nil
+}
+
+// Write converts the data into HTTP2 data frame and sends it out. Non-nil error
+// is returns if it fails (e.g., framing error, transport error).
+func (t *http2Server) Write(s *Stream, data []byte, opts *Options) error {
+ // TODO(zhaoq): Support multi-writers for a single stream.
+ var writeHeaderFrame bool
+ s.mu.Lock()
+ if s.state == streamDone {
+ s.mu.Unlock()
+ return StreamErrorf(codes.Unknown, "the stream has been done")
+ }
+ if !s.headerOk {
+ writeHeaderFrame = true
+ s.headerOk = true
+ }
+ s.mu.Unlock()
+ if writeHeaderFrame {
+ if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil {
+ return err
+ }
+ t.hBuf.Reset()
+ t.hEnc.WriteField(hpack.HeaderField{Name: ":status", Value: "200"})
+ t.hEnc.WriteField(hpack.HeaderField{Name: "content-type", Value: "application/grpc"})
+ if s.sendCompress != "" {
+ t.hEnc.WriteField(hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress})
+ }
+ p := http2.HeadersFrameParam{
+ StreamID: s.id,
+ BlockFragment: t.hBuf.Bytes(),
+ EndHeaders: true,
+ }
+ if err := t.framer.writeHeaders(false, p); err != nil {
+ t.Close()
+ return ConnectionErrorf("transport: %v", err)
+ }
+ t.writableChan <- 0
+ }
+ r := bytes.NewBuffer(data)
+ for {
+ if r.Len() == 0 {
+ return nil
+ }
+ size := http2MaxFrameLen
+ s.sendQuotaPool.add(0)
+ // Wait until the stream has some quota to send the data.
+ sq, err := wait(s.ctx, t.shutdownChan, s.sendQuotaPool.acquire())
+ if err != nil {
+ return err
+ }
+ t.sendQuotaPool.add(0)
+ // Wait until the transport has some quota to send the data.
+ tq, err := wait(s.ctx, t.shutdownChan, t.sendQuotaPool.acquire())
+ if err != nil {
+ if _, ok := err.(StreamError); ok {
+ t.sendQuotaPool.cancel()
+ }
+ return err
+ }
+ if sq < size {
+ size = sq
+ }
+ if tq < size {
+ size = tq
+ }
+ p := r.Next(size)
+ ps := len(p)
+ if ps < sq {
+ // Overbooked stream quota. Return it back.
+ s.sendQuotaPool.add(sq - ps)
+ }
+ if ps < tq {
+ // Overbooked transport quota. Return it back.
+ t.sendQuotaPool.add(tq - ps)
+ }
+ t.framer.adjustNumWriters(1)
+ // Got some quota. Try to acquire writing privilege on the
+ // transport.
+ if _, err := wait(s.ctx, t.shutdownChan, t.writableChan); err != nil {
+ if _, ok := err.(StreamError); ok {
+ // Return the connection quota back.
+ t.sendQuotaPool.add(ps)
+ }
+ if t.framer.adjustNumWriters(-1) == 0 {
+ // This writer is the last one in this batch and has the
+ // responsibility to flush the buffered frames. It queues
+ // a flush request to controlBuf instead of flushing directly
+ // in order to avoid the race with other writing or flushing.
+ t.controlBuf.put(&flushIO{})
+ }
+ return err
+ }
+ select {
+ case <-s.ctx.Done():
+ t.sendQuotaPool.add(ps)
+ if t.framer.adjustNumWriters(-1) == 0 {
+ t.controlBuf.put(&flushIO{})
+ }
+ t.writableChan <- 0
+ return ContextErr(s.ctx.Err())
+ default:
+ }
+ var forceFlush bool
+ if r.Len() == 0 && t.framer.adjustNumWriters(0) == 1 && !opts.Last {
+ forceFlush = true
+ }
+ if err := t.framer.writeData(forceFlush, s.id, false, p); err != nil {
+ t.Close()
+ return ConnectionErrorf("transport: %v", err)
+ }
+ if t.framer.adjustNumWriters(-1) == 0 {
+ t.framer.flushWrite()
+ }
+ t.writableChan <- 0
+ }
+
+}
+
+func (t *http2Server) applySettings(ss []http2.Setting) {
+ for _, s := range ss {
+ if s.ID == http2.SettingInitialWindowSize {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ for _, stream := range t.activeStreams {
+ stream.sendQuotaPool.reset(int(s.Val - t.streamSendQuota))
+ }
+ t.streamSendQuota = s.Val
+ }
+
+ }
+}
+
+// controller running in a separate goroutine takes charge of sending control
+// frames (e.g., window update, reset stream, setting, etc.) to the server.
+func (t *http2Server) controller() {
+ for {
+ select {
+ case i := <-t.controlBuf.get():
+ t.controlBuf.load()
+ select {
+ case <-t.writableChan:
+ switch i := i.(type) {
+ case *windowUpdate:
+ t.framer.writeWindowUpdate(true, i.streamID, i.increment)
+ case *settings:
+ if i.ack {
+ t.framer.writeSettingsAck(true)
+ t.applySettings(i.ss)
+ } else {
+ t.framer.writeSettings(true, i.ss...)
+ }
+ case *resetStream:
+ t.framer.writeRSTStream(true, i.streamID, i.code)
+ case *flushIO:
+ t.framer.flushWrite()
+ case *ping:
+ t.framer.writePing(true, i.ack, i.data)
+ default:
+ grpclog.Printf("transport: http2Server.controller got unexpected item type %v\n", i)
+ }
+ t.writableChan <- 0
+ continue
+ case <-t.shutdownChan:
+ return
+ }
+ case <-t.shutdownChan:
+ return
+ }
+ }
+}
+
+// Close starts shutting down the http2Server transport.
+// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This
+// could cause some resource issue. Revisit this later.
+func (t *http2Server) Close() (err error) {
+ t.mu.Lock()
+ if t.state == closing {
+ t.mu.Unlock()
+ return errors.New("transport: Close() was already called")
+ }
+ t.state = closing
+ streams := t.activeStreams
+ t.activeStreams = nil
+ t.mu.Unlock()
+ close(t.shutdownChan)
+ err = t.conn.Close()
+ // Cancel all active streams.
+ for _, s := range streams {
+ s.cancel()
+ }
+ return
+}
+
+// closeStream clears the footprint of a stream when the stream is not needed
+// any more.
+func (t *http2Server) closeStream(s *Stream) {
+ t.mu.Lock()
+ delete(t.activeStreams, s.id)
+ t.mu.Unlock()
+ // In case stream sending and receiving are invoked in separate
+ // goroutines (e.g., bi-directional streaming), cancel needs to be
+ // called to interrupt the potential blocking on other goroutines.
+ s.cancel()
+ s.mu.Lock()
+ if q := s.fc.resetPendingData(); q > 0 {
+ if w := t.fc.onRead(q); w > 0 {
+ t.controlBuf.put(&windowUpdate{0, w})
+ }
+ }
+ if s.state == streamDone {
+ s.mu.Unlock()
+ return
+ }
+ s.state = streamDone
+ s.mu.Unlock()
+}
+
+func (t *http2Server) RemoteAddr() net.Addr {
+ return t.conn.RemoteAddr()
+}
diff --git a/vendor/google.golang.org/grpc/transport/http_util.go b/vendor/google.golang.org/grpc/transport/http_util.go
new file mode 100644
index 0000000..a4b1b07
--- /dev/null
+++ b/vendor/google.golang.org/grpc/transport/http_util.go
@@ -0,0 +1,422 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+package transport
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "net"
+ "strconv"
+ "strings"
+ "sync/atomic"
+ "time"
+
+ "golang.org/x/net/http2"
+ "golang.org/x/net/http2/hpack"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/metadata"
+)
+
+const (
+ // The primary user agent
+ primaryUA = "grpc-go/0.11"
+ // http2MaxFrameLen specifies the max length of a HTTP2 frame.
+ http2MaxFrameLen = 16384 // 16KB frame
+ // http://http2.github.io/http2-spec/#SettingValues
+ http2InitHeaderTableSize = 4096
+ // http2IOBufSize specifies the buffer size for sending frames.
+ http2IOBufSize = 32 * 1024
+)
+
+var (
+ clientPreface = []byte(http2.ClientPreface)
+ http2ErrConvTab = map[http2.ErrCode]codes.Code{
+ http2.ErrCodeNo: codes.Internal,
+ http2.ErrCodeProtocol: codes.Internal,
+ http2.ErrCodeInternal: codes.Internal,
+ http2.ErrCodeFlowControl: codes.ResourceExhausted,
+ http2.ErrCodeSettingsTimeout: codes.Internal,
+ http2.ErrCodeStreamClosed: codes.Internal,
+ http2.ErrCodeFrameSize: codes.Internal,
+ http2.ErrCodeRefusedStream: codes.Unavailable,
+ http2.ErrCodeCancel: codes.Canceled,
+ http2.ErrCodeCompression: codes.Internal,
+ http2.ErrCodeConnect: codes.Internal,
+ http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted,
+ http2.ErrCodeInadequateSecurity: codes.PermissionDenied,
+ http2.ErrCodeHTTP11Required: codes.FailedPrecondition,
+ }
+ statusCodeConvTab = map[codes.Code]http2.ErrCode{
+ codes.Internal: http2.ErrCodeInternal,
+ codes.Canceled: http2.ErrCodeCancel,
+ codes.Unavailable: http2.ErrCodeRefusedStream,
+ codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm,
+ codes.PermissionDenied: http2.ErrCodeInadequateSecurity,
+ }
+)
+
+// Records the states during HPACK decoding. Must be reset once the
+// decoding of the entire headers are finished.
+type decodeState struct {
+ err error // first error encountered decoding
+
+ encoding string
+ // statusCode caches the stream status received from the trailer
+ // the server sent. Client side only.
+ statusCode codes.Code
+ statusDesc string
+ // Server side only fields.
+ timeoutSet bool
+ timeout time.Duration
+ method string
+ // key-value metadata map from the peer.
+ mdata map[string][]string
+}
+
+// isReservedHeader checks whether hdr belongs to HTTP2 headers
+// reserved by gRPC protocol. Any other headers are classified as the
+// user-specified metadata.
+func isReservedHeader(hdr string) bool {
+ if hdr != "" && hdr[0] == ':' {
+ return true
+ }
+ switch hdr {
+ case "content-type",
+ "grpc-message-type",
+ "grpc-encoding",
+ "grpc-message",
+ "grpc-status",
+ "grpc-timeout",
+ "te":
+ return true
+ default:
+ return false
+ }
+}
+
+// isWhitelistedPseudoHeader checks whether hdr belongs to HTTP2 pseudoheaders
+// that should be propagated into metadata visible to users.
+func isWhitelistedPseudoHeader(hdr string) bool {
+ switch hdr {
+ case ":authority":
+ return true
+ default:
+ return false
+ }
+}
+
+func (d *decodeState) setErr(err error) {
+ if d.err == nil {
+ d.err = err
+ }
+}
+
+func (d *decodeState) processHeaderField(f hpack.HeaderField) {
+ switch f.Name {
+ case "content-type":
+ if !strings.Contains(f.Value, "application/grpc") {
+ d.setErr(StreamErrorf(codes.FailedPrecondition, "transport: received the unexpected content-type %q", f.Value))
+ return
+ }
+ case "grpc-encoding":
+ d.encoding = f.Value
+ case "grpc-status":
+ code, err := strconv.Atoi(f.Value)
+ if err != nil {
+ d.setErr(StreamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err))
+ return
+ }
+ d.statusCode = codes.Code(code)
+ case "grpc-message":
+ d.statusDesc = f.Value
+ case "grpc-timeout":
+ d.timeoutSet = true
+ var err error
+ d.timeout, err = timeoutDecode(f.Value)
+ if err != nil {
+ d.setErr(StreamErrorf(codes.Internal, "transport: malformed time-out: %v", err))
+ return
+ }
+ case ":path":
+ d.method = f.Value
+ default:
+ if !isReservedHeader(f.Name) || isWhitelistedPseudoHeader(f.Name) {
+ if f.Name == "user-agent" {
+ i := strings.LastIndex(f.Value, " ")
+ if i == -1 {
+ // There is no application user agent string being set.
+ return
+ }
+ // Extract the application user agent string.
+ f.Value = f.Value[:i]
+ }
+ if d.mdata == nil {
+ d.mdata = make(map[string][]string)
+ }
+ k, v, err := metadata.DecodeKeyValue(f.Name, f.Value)
+ if err != nil {
+ grpclog.Printf("Failed to decode (%q, %q): %v", f.Name, f.Value, err)
+ return
+ }
+ d.mdata[k] = append(d.mdata[k], v)
+ }
+ }
+}
+
+type timeoutUnit uint8
+
+const (
+ hour timeoutUnit = 'H'
+ minute timeoutUnit = 'M'
+ second timeoutUnit = 'S'
+ millisecond timeoutUnit = 'm'
+ microsecond timeoutUnit = 'u'
+ nanosecond timeoutUnit = 'n'
+)
+
+func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) {
+ switch u {
+ case hour:
+ return time.Hour, true
+ case minute:
+ return time.Minute, true
+ case second:
+ return time.Second, true
+ case millisecond:
+ return time.Millisecond, true
+ case microsecond:
+ return time.Microsecond, true
+ case nanosecond:
+ return time.Nanosecond, true
+ default:
+ }
+ return
+}
+
+const maxTimeoutValue int64 = 100000000 - 1
+
+// div does integer division and round-up the result. Note that this is
+// equivalent to (d+r-1)/r but has less chance to overflow.
+func div(d, r time.Duration) int64 {
+ if m := d % r; m > 0 {
+ return int64(d/r + 1)
+ }
+ return int64(d / r)
+}
+
+// TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it.
+func timeoutEncode(t time.Duration) string {
+ if d := div(t, time.Nanosecond); d <= maxTimeoutValue {
+ return strconv.FormatInt(d, 10) + "n"
+ }
+ if d := div(t, time.Microsecond); d <= maxTimeoutValue {
+ return strconv.FormatInt(d, 10) + "u"
+ }
+ if d := div(t, time.Millisecond); d <= maxTimeoutValue {
+ return strconv.FormatInt(d, 10) + "m"
+ }
+ if d := div(t, time.Second); d <= maxTimeoutValue {
+ return strconv.FormatInt(d, 10) + "S"
+ }
+ if d := div(t, time.Minute); d <= maxTimeoutValue {
+ return strconv.FormatInt(d, 10) + "M"
+ }
+ // Note that maxTimeoutValue * time.Hour > MaxInt64.
+ return strconv.FormatInt(div(t, time.Hour), 10) + "H"
+}
+
+func timeoutDecode(s string) (time.Duration, error) {
+ size := len(s)
+ if size < 2 {
+ return 0, fmt.Errorf("transport: timeout string is too short: %q", s)
+ }
+ unit := timeoutUnit(s[size-1])
+ d, ok := timeoutUnitToDuration(unit)
+ if !ok {
+ return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s)
+ }
+ t, err := strconv.ParseInt(s[:size-1], 10, 64)
+ if err != nil {
+ return 0, err
+ }
+ return d * time.Duration(t), nil
+}
+
+type framer struct {
+ numWriters int32
+ reader io.Reader
+ writer *bufio.Writer
+ fr *http2.Framer
+}
+
+func newFramer(conn net.Conn) *framer {
+ f := &framer{
+ reader: bufio.NewReaderSize(conn, http2IOBufSize),
+ writer: bufio.NewWriterSize(conn, http2IOBufSize),
+ }
+ f.fr = http2.NewFramer(f.writer, f.reader)
+ f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil)
+ return f
+}
+
+func (f *framer) adjustNumWriters(i int32) int32 {
+ return atomic.AddInt32(&f.numWriters, i)
+}
+
+// The following writeXXX functions can only be called when the caller gets
+// unblocked from writableChan channel (i.e., owns the privilege to write).
+
+func (f *framer) writeContinuation(forceFlush bool, streamID uint32, endHeaders bool, headerBlockFragment []byte) error {
+ if err := f.fr.WriteContinuation(streamID, endHeaders, headerBlockFragment); err != nil {
+ return err
+ }
+ if forceFlush {
+ return f.writer.Flush()
+ }
+ return nil
+}
+
+func (f *framer) writeData(forceFlush bool, streamID uint32, endStream bool, data []byte) error {
+ if err := f.fr.WriteData(streamID, endStream, data); err != nil {
+ return err
+ }
+ if forceFlush {
+ return f.writer.Flush()
+ }
+ return nil
+}
+
+func (f *framer) writeGoAway(forceFlush bool, maxStreamID uint32, code http2.ErrCode, debugData []byte) error {
+ if err := f.fr.WriteGoAway(maxStreamID, code, debugData); err != nil {
+ return err
+ }
+ if forceFlush {
+ return f.writer.Flush()
+ }
+ return nil
+}
+
+func (f *framer) writeHeaders(forceFlush bool, p http2.HeadersFrameParam) error {
+ if err := f.fr.WriteHeaders(p); err != nil {
+ return err
+ }
+ if forceFlush {
+ return f.writer.Flush()
+ }
+ return nil
+}
+
+func (f *framer) writePing(forceFlush, ack bool, data [8]byte) error {
+ if err := f.fr.WritePing(ack, data); err != nil {
+ return err
+ }
+ if forceFlush {
+ return f.writer.Flush()
+ }
+ return nil
+}
+
+func (f *framer) writePriority(forceFlush bool, streamID uint32, p http2.PriorityParam) error {
+ if err := f.fr.WritePriority(streamID, p); err != nil {
+ return err
+ }
+ if forceFlush {
+ return f.writer.Flush()
+ }
+ return nil
+}
+
+func (f *framer) writePushPromise(forceFlush bool, p http2.PushPromiseParam) error {
+ if err := f.fr.WritePushPromise(p); err != nil {
+ return err
+ }
+ if forceFlush {
+ return f.writer.Flush()
+ }
+ return nil
+}
+
+func (f *framer) writeRSTStream(forceFlush bool, streamID uint32, code http2.ErrCode) error {
+ if err := f.fr.WriteRSTStream(streamID, code); err != nil {
+ return err
+ }
+ if forceFlush {
+ return f.writer.Flush()
+ }
+ return nil
+}
+
+func (f *framer) writeSettings(forceFlush bool, settings ...http2.Setting) error {
+ if err := f.fr.WriteSettings(settings...); err != nil {
+ return err
+ }
+ if forceFlush {
+ return f.writer.Flush()
+ }
+ return nil
+}
+
+func (f *framer) writeSettingsAck(forceFlush bool) error {
+ if err := f.fr.WriteSettingsAck(); err != nil {
+ return err
+ }
+ if forceFlush {
+ return f.writer.Flush()
+ }
+ return nil
+}
+
+func (f *framer) writeWindowUpdate(forceFlush bool, streamID, incr uint32) error {
+ if err := f.fr.WriteWindowUpdate(streamID, incr); err != nil {
+ return err
+ }
+ if forceFlush {
+ return f.writer.Flush()
+ }
+ return nil
+}
+
+func (f *framer) flushWrite() error {
+ return f.writer.Flush()
+}
+
+func (f *framer) readFrame() (http2.Frame, error) {
+ return f.fr.ReadFrame()
+}
+
+func (f *framer) errorDetail() error {
+ return f.fr.ErrorDetail()
+}
diff --git a/vendor/google.golang.org/grpc/transport/transport.go b/vendor/google.golang.org/grpc/transport/transport.go
new file mode 100644
index 0000000..1c9af54
--- /dev/null
+++ b/vendor/google.golang.org/grpc/transport/transport.go
@@ -0,0 +1,513 @@
+/*
+ *
+ * Copyright 2014, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+Package transport defines and implements message oriented communication channel
+to complete various transactions (e.g., an RPC).
+*/
+package transport // import "google.golang.org/grpc/transport"
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "sync"
+ "time"
+
+ "golang.org/x/net/context"
+ "golang.org/x/net/trace"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/metadata"
+)
+
+// recvMsg represents the received msg from the transport. All transport
+// protocol specific info has been removed.
+type recvMsg struct {
+ data []byte
+ // nil: received some data
+ // io.EOF: stream is completed. data is nil.
+ // other non-nil error: transport failure. data is nil.
+ err error
+}
+
+func (*recvMsg) item() {}
+
+// All items in an out of a recvBuffer should be the same type.
+type item interface {
+ item()
+}
+
+// recvBuffer is an unbounded channel of item.
+type recvBuffer struct {
+ c chan item
+ mu sync.Mutex
+ backlog []item
+}
+
+func newRecvBuffer() *recvBuffer {
+ b := &recvBuffer{
+ c: make(chan item, 1),
+ }
+ return b
+}
+
+func (b *recvBuffer) put(r item) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ if len(b.backlog) == 0 {
+ select {
+ case b.c <- r:
+ return
+ default:
+ }
+ }
+ b.backlog = append(b.backlog, r)
+}
+
+func (b *recvBuffer) load() {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ if len(b.backlog) > 0 {
+ select {
+ case b.c <- b.backlog[0]:
+ b.backlog = b.backlog[1:]
+ default:
+ }
+ }
+}
+
+// get returns the channel that receives an item in the buffer.
+//
+// Upon receipt of an item, the caller should call load to send another
+// item onto the channel if there is any.
+func (b *recvBuffer) get() <-chan item {
+ return b.c
+}
+
+// recvBufferReader implements io.Reader interface to read the data from
+// recvBuffer.
+type recvBufferReader struct {
+ ctx context.Context
+ recv *recvBuffer
+ last *bytes.Reader // Stores the remaining data in the previous calls.
+ err error
+}
+
+// Read reads the next len(p) bytes from last. If last is drained, it tries to
+// read additional data from recv. It blocks if there no additional data available
+// in recv. If Read returns any non-nil error, it will continue to return that error.
+func (r *recvBufferReader) Read(p []byte) (n int, err error) {
+ if r.err != nil {
+ return 0, r.err
+ }
+ defer func() { r.err = err }()
+ if r.last != nil && r.last.Len() > 0 {
+ // Read remaining data left in last call.
+ return r.last.Read(p)
+ }
+ select {
+ case <-r.ctx.Done():
+ return 0, ContextErr(r.ctx.Err())
+ case i := <-r.recv.get():
+ r.recv.load()
+ m := i.(*recvMsg)
+ if m.err != nil {
+ return 0, m.err
+ }
+ r.last = bytes.NewReader(m.data)
+ return r.last.Read(p)
+ }
+}
+
+type streamState uint8
+
+const (
+ streamActive streamState = iota
+ streamWriteDone // EndStream sent
+ streamReadDone // EndStream received
+ streamDone // sendDone and recvDone or RSTStreamFrame is sent or received.
+)
+
+// Stream represents an RPC in the transport layer.
+type Stream struct {
+ id uint32
+ // nil for client side Stream.
+ st ServerTransport
+ // ctx is the associated context of the stream.
+ ctx context.Context
+ cancel context.CancelFunc
+ // method records the associated RPC method of the stream.
+ method string
+ recvCompress string
+ sendCompress string
+ buf *recvBuffer
+ dec io.Reader
+ fc *inFlow
+ recvQuota uint32
+ // The accumulated inbound quota pending for window update.
+ updateQuota uint32
+ // The handler to control the window update procedure for both this
+ // particular stream and the associated transport.
+ windowHandler func(int)
+
+ sendQuotaPool *quotaPool
+ // Close headerChan to indicate the end of reception of header metadata.
+ headerChan chan struct{}
+ // header caches the received header metadata.
+ header metadata.MD
+ // The key-value map of trailer metadata.
+ trailer metadata.MD
+
+ mu sync.RWMutex // guard the following
+ // headerOK becomes true from the first header is about to send.
+ headerOk bool
+ state streamState
+ // true iff headerChan is closed. Used to avoid closing headerChan
+ // multiple times.
+ headerDone bool
+ // the status received from the server.
+ statusCode codes.Code
+ statusDesc string
+}
+
+// RecvCompress returns the compression algorithm applied to the inbound
+// message. It is empty string if there is no compression applied.
+func (s *Stream) RecvCompress() string {
+ return s.recvCompress
+}
+
+// SetSendCompress sets the compression algorithm to the stream.
+func (s *Stream) SetSendCompress(str string) {
+ s.sendCompress = str
+}
+
+// Header acquires the key-value pairs of header metadata once it
+// is available. It blocks until i) the metadata is ready or ii) there is no
+// header metadata or iii) the stream is cancelled/expired.
+func (s *Stream) Header() (metadata.MD, error) {
+ select {
+ case <-s.ctx.Done():
+ return nil, ContextErr(s.ctx.Err())
+ case <-s.headerChan:
+ return s.header.Copy(), nil
+ }
+}
+
+// Trailer returns the cached trailer metedata. Note that if it is not called
+// after the entire stream is done, it could return an empty MD. Client
+// side only.
+func (s *Stream) Trailer() metadata.MD {
+ s.mu.RLock()
+ defer s.mu.RUnlock()
+ return s.trailer.Copy()
+}
+
+// ServerTransport returns the underlying ServerTransport for the stream.
+// The client side stream always returns nil.
+func (s *Stream) ServerTransport() ServerTransport {
+ return s.st
+}
+
+// Context returns the context of the stream.
+func (s *Stream) Context() context.Context {
+ return s.ctx
+}
+
+// TraceContext recreates the context of s with a trace.Trace.
+func (s *Stream) TraceContext(tr trace.Trace) {
+ s.ctx = trace.NewContext(s.ctx, tr)
+}
+
+// Method returns the method for the stream.
+func (s *Stream) Method() string {
+ return s.method
+}
+
+// StatusCode returns statusCode received from the server.
+func (s *Stream) StatusCode() codes.Code {
+ return s.statusCode
+}
+
+// StatusDesc returns statusDesc received from the server.
+func (s *Stream) StatusDesc() string {
+ return s.statusDesc
+}
+
+// ErrIllegalTrailerSet indicates that the trailer has already been set or it
+// is too late to do so.
+var ErrIllegalTrailerSet = errors.New("transport: trailer has been set")
+
+// SetTrailer sets the trailer metadata which will be sent with the RPC status
+// by the server. This can only be called at most once. Server side only.
+func (s *Stream) SetTrailer(md metadata.MD) error {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if s.trailer != nil {
+ return ErrIllegalTrailerSet
+ }
+ s.trailer = md.Copy()
+ return nil
+}
+
+func (s *Stream) write(m recvMsg) {
+ s.buf.put(&m)
+}
+
+// Read reads all the data available for this Stream from the transport and
+// passes them into the decoder, which converts them into a gRPC message stream.
+// The error is io.EOF when the stream is done or another non-nil error if
+// the stream broke.
+func (s *Stream) Read(p []byte) (n int, err error) {
+ n, err = s.dec.Read(p)
+ if err != nil {
+ return
+ }
+ s.windowHandler(n)
+ return
+}
+
+// The key to save transport.Stream in the context.
+type streamKey struct{}
+
+// newContextWithStream creates a new context from ctx and attaches stream
+// to it.
+func newContextWithStream(ctx context.Context, stream *Stream) context.Context {
+ return context.WithValue(ctx, streamKey{}, stream)
+}
+
+// StreamFromContext returns the stream saved in ctx.
+func StreamFromContext(ctx context.Context) (s *Stream, ok bool) {
+ s, ok = ctx.Value(streamKey{}).(*Stream)
+ return
+}
+
+// state of transport
+type transportState int
+
+const (
+ reachable transportState = iota
+ unreachable
+ closing
+ draining
+)
+
+// NewServerTransport creates a ServerTransport with conn or non-nil error
+// if it fails.
+func NewServerTransport(protocol string, conn net.Conn, maxStreams uint32, authInfo credentials.AuthInfo) (ServerTransport, error) {
+ return newHTTP2Server(conn, maxStreams, authInfo)
+}
+
+// ConnectOptions covers all relevant options for dialing a server.
+type ConnectOptions struct {
+ // UserAgent is the application user agent.
+ UserAgent string
+ // Dialer specifies how to dial a network address.
+ Dialer func(string, time.Duration) (net.Conn, error)
+ // AuthOptions stores the credentials required to setup a client connection and/or issue RPCs.
+ AuthOptions []credentials.Credentials
+ // Timeout specifies the timeout for dialing a client connection.
+ Timeout time.Duration
+}
+
+// NewClientTransport establishes the transport with the required ConnectOptions
+// and returns it to the caller.
+func NewClientTransport(target string, opts *ConnectOptions) (ClientTransport, error) {
+ return newHTTP2Client(target, opts)
+}
+
+// Options provides additional hints and information for message
+// transmission.
+type Options struct {
+ // Last indicates whether this write is the last piece for
+ // this stream.
+ Last bool
+
+ // Delay is a hint to the transport implementation for whether
+ // the data could be buffered for a batching write. The
+ // Transport implementation may ignore the hint.
+ Delay bool
+}
+
+// CallHdr carries the information of a particular RPC.
+type CallHdr struct {
+ // Host specifies the peer's host.
+ Host string
+
+ // Method specifies the operation to perform.
+ Method string
+
+ // RecvCompress specifies the compression algorithm applied on
+ // inbound messages.
+ RecvCompress string
+
+ // SendCompress specifies the compression algorithm applied on
+ // outbound message.
+ SendCompress string
+
+ // Flush indicates whether a new stream command should be sent
+ // to the peer without waiting for the first data. This is
+ // only a hint. The transport may modify the flush decision
+ // for performance purposes.
+ Flush bool
+}
+
+// ClientTransport is the common interface for all gRPC client-side transport
+// implementations.
+type ClientTransport interface {
+ // Close tears down this transport. Once it returns, the transport
+ // should not be accessed any more. The caller must make sure this
+ // is called only once.
+ Close() error
+
+ // GracefulClose starts to tear down the transport. It stops accepting
+ // new RPCs and wait the completion of the pending RPCs.
+ GracefulClose() error
+
+ // Write sends the data for the given stream. A nil stream indicates
+ // the write is to be performed on the transport as a whole.
+ Write(s *Stream, data []byte, opts *Options) error
+
+ // NewStream creates a Stream for an RPC.
+ NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error)
+
+ // CloseStream clears the footprint of a stream when the stream is
+ // not needed any more. The err indicates the error incurred when
+ // CloseStream is called. Must be called when a stream is finished
+ // unless the associated transport is closing.
+ CloseStream(stream *Stream, err error)
+
+ // Error returns a channel that is closed when some I/O error
+ // happens. Typically the caller should have a goroutine to monitor
+ // this in order to take action (e.g., close the current transport
+ // and create a new one) in error case. It should not return nil
+ // once the transport is initiated.
+ Error() <-chan struct{}
+}
+
+// ServerTransport is the common interface for all gRPC server-side transport
+// implementations.
+//
+// Methods may be called concurrently from multiple goroutines, but
+// Write methods for a given Stream will be called serially.
+type ServerTransport interface {
+ // HandleStreams receives incoming streams using the given handler.
+ HandleStreams(func(*Stream))
+
+ // WriteHeader sends the header metadata for the given stream.
+ // WriteHeader may not be called on all streams.
+ WriteHeader(s *Stream, md metadata.MD) error
+
+ // Write sends the data for the given stream.
+ // Write may not be called on all streams.
+ Write(s *Stream, data []byte, opts *Options) error
+
+ // WriteStatus sends the status of a stream to the client.
+ // WriteStatus is the final call made on a stream and always
+ // occurs.
+ WriteStatus(s *Stream, statusCode codes.Code, statusDesc string) error
+
+ // Close tears down the transport. Once it is called, the transport
+ // should not be accessed any more. All the pending streams and their
+ // handlers will be terminated asynchronously.
+ Close() error
+
+ // RemoteAddr returns the remote network address.
+ RemoteAddr() net.Addr
+}
+
+// StreamErrorf creates an StreamError with the specified error code and description.
+func StreamErrorf(c codes.Code, format string, a ...interface{}) StreamError {
+ return StreamError{
+ Code: c,
+ Desc: fmt.Sprintf(format, a...),
+ }
+}
+
+// ConnectionErrorf creates an ConnectionError with the specified error description.
+func ConnectionErrorf(format string, a ...interface{}) ConnectionError {
+ return ConnectionError{
+ Desc: fmt.Sprintf(format, a...),
+ }
+}
+
+// ConnectionError is an error that results in the termination of the
+// entire connection and the retry of all the active streams.
+type ConnectionError struct {
+ Desc string
+}
+
+func (e ConnectionError) Error() string {
+ return fmt.Sprintf("connection error: desc = %q", e.Desc)
+}
+
+// Define some common ConnectionErrors.
+var ErrConnClosing = ConnectionError{Desc: "transport is closing"}
+
+// StreamError is an error that only affects one stream within a connection.
+type StreamError struct {
+ Code codes.Code
+ Desc string
+}
+
+func (e StreamError) Error() string {
+ return fmt.Sprintf("stream error: code = %d desc = %q", e.Code, e.Desc)
+}
+
+// ContextErr converts the error from context package into a StreamError.
+func ContextErr(err error) StreamError {
+ switch err {
+ case context.DeadlineExceeded:
+ return StreamErrorf(codes.DeadlineExceeded, "%v", err)
+ case context.Canceled:
+ return StreamErrorf(codes.Canceled, "%v", err)
+ }
+ panic(fmt.Sprintf("Unexpected error from context packet: %v", err))
+}
+
+// wait blocks until it can receive from ctx.Done, closing, or proceed.
+// If it receives from ctx.Done, it returns 0, the StreamError for ctx.Err.
+// If it receives from closing, it returns 0, ErrConnClosing.
+// If it receives from proceed, it returns the received integer, nil.
+func wait(ctx context.Context, closing <-chan struct{}, proceed <-chan int) (int, error) {
+ select {
+ case <-ctx.Done():
+ return 0, ContextErr(ctx.Err())
+ case <-closing:
+ return 0, ErrConnClosing
+ case i := <-proceed:
+ return i, nil
+ }
+}
diff --git a/vendor/vendor.json b/vendor/vendor.json
index 9e4593f..75f7e78 100644
--- a/vendor/vendor.json
+++ b/vendor/vendor.json
@@ -9,6 +9,138 @@
"revisionTime": "2016-04-26T10:16:13Z"
},
{
+ "checksumSHA1": "zKsZOyaTSC4MmdbKfiM6iUCjvGk=",
+ "path": "github.com/aws/aws-sdk-go/aws",
+ "revision": "1608a1f6ffe7f327007d2aca50ebf1d26c285ef2",
+ "revisionTime": "2016-06-03T16:17:08Z"
+ },
+ {
+ "checksumSHA1": "AWg3FBA1NTPdIVZipaQf/rGx38o=",
+ "path": "github.com/aws/aws-sdk-go/aws/awserr",
+ "revision": "1608a1f6ffe7f327007d2aca50ebf1d26c285ef2",
+ "revisionTime": "2016-06-03T16:17:08Z"
+ },
+ {
+ "checksumSHA1": "dkfyy7aRNZ6BmUZ4ZdLIcMMXiPA=",
+ "path": "github.com/aws/aws-sdk-go/aws/awsutil",
+ "revision": "1608a1f6ffe7f327007d2aca50ebf1d26c285ef2",
+ "revisionTime": "2016-06-03T16:17:08Z"
+ },
+ {
+ "checksumSHA1": "RsYlRfQceaAgqjIrExwNsb/RBEM=",
+ "path": "github.com/aws/aws-sdk-go/aws/client",
+ "revision": "1608a1f6ffe7f327007d2aca50ebf1d26c285ef2",
+ "revisionTime": "2016-06-03T16:17:08Z"
+ },
+ {
+ "checksumSHA1": "ieAJ+Cvp/PKv1LpUEnUXpc3OI6E=",
+ "path": "github.com/aws/aws-sdk-go/aws/client/metadata",
+ "revision": "1608a1f6ffe7f327007d2aca50ebf1d26c285ef2",
+ "revisionTime": "2016-06-03T16:17:08Z"
+ },
+ {
+ "checksumSHA1": "gNWirlrTfSLbOe421hISBAhTqa4=",
+ "path": "github.com/aws/aws-sdk-go/aws/corehandlers",
+ "revision": "1608a1f6ffe7f327007d2aca50ebf1d26c285ef2",
+ "revisionTime": "2016-06-03T16:17:08Z"
+ },
+ {
+ "checksumSHA1": "EiauD48zRlXIFvAENgZ+PXSEnT0=",
+ "path": "github.com/aws/aws-sdk-go/aws/credentials",
+ "revision": "1608a1f6ffe7f327007d2aca50ebf1d26c285ef2",
+ "revisionTime": "2016-06-03T16:17:08Z"
+ },
+ {
+ "checksumSHA1": "KQiUK/zr3mqnAXD7x/X55/iNme0=",
+ "path": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds",
+ "revision": "1608a1f6ffe7f327007d2aca50ebf1d26c285ef2",
+ "revisionTime": "2016-06-03T16:17:08Z"
+ },
+ {
+ "checksumSHA1": "t9z4goehHyiGgU85snZcFogywwk=",
+ "path": "github.com/aws/aws-sdk-go/aws/defaults",
+ "revision": "1608a1f6ffe7f327007d2aca50ebf1d26c285ef2",
+ "revisionTime": "2016-06-03T16:17:08Z"
+ },
+ {
+ "checksumSHA1": "U0SthWum+t9ACanK7SDJOg3dO6M=",
+ "path": "github.com/aws/aws-sdk-go/aws/ec2metadata",
+ "revision": "1608a1f6ffe7f327007d2aca50ebf1d26c285ef2",
+ "revisionTime": "2016-06-03T16:17:08Z"
+ },
+ {
+ "checksumSHA1": "1hUf2Q/nSEF1Ee4cnBBica+9C+E=",
+ "path": "github.com/aws/aws-sdk-go/aws/request",
+ "revision": "1608a1f6ffe7f327007d2aca50ebf1d26c285ef2",
+ "revisionTime": "2016-06-03T16:17:08Z"
+ },
+ {
+ "checksumSHA1": "46SVikiXo5xuy/CS6mM1XVTUU7w=",
+ "path": "github.com/aws/aws-sdk-go/aws/session",
+ "revision": "1608a1f6ffe7f327007d2aca50ebf1d26c285ef2",
+ "revisionTime": "2016-06-03T16:17:08Z"
+ },
+ {
+ "checksumSHA1": "sgft7A0lRCVD7QBogydg46lr3NM=",
+ "path": "github.com/aws/aws-sdk-go/private/endpoints",
+ "revision": "1608a1f6ffe7f327007d2aca50ebf1d26c285ef2",
+ "revisionTime": "2016-06-03T16:17:08Z"
+ },
+ {
+ "checksumSHA1": "wk7EyvDaHwb5qqoOP/4d3cV0708=",
+ "path": "github.com/aws/aws-sdk-go/private/protocol",
+ "revision": "1608a1f6ffe7f327007d2aca50ebf1d26c285ef2",
+ "revisionTime": "2016-06-03T16:17:08Z"
+ },
+ {
+ "checksumSHA1": "nHHyS4+VgZOV7F3Xu87crArmbds=",
+ "path": "github.com/aws/aws-sdk-go/private/protocol/query",
+ "revision": "1608a1f6ffe7f327007d2aca50ebf1d26c285ef2",
+ "revisionTime": "2016-06-03T16:17:08Z"
+ },
+ {
+ "checksumSHA1": "5xzix1R8prUyWxgLnzUQoxTsfik=",
+ "path": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil",
+ "revision": "1608a1f6ffe7f327007d2aca50ebf1d26c285ef2",
+ "revisionTime": "2016-06-03T16:17:08Z"
+ },
+ {
+ "checksumSHA1": "TW/7U+/8ormL7acf6z2rv2hDD+s=",
+ "path": "github.com/aws/aws-sdk-go/private/protocol/rest",
+ "revision": "1608a1f6ffe7f327007d2aca50ebf1d26c285ef2",
+ "revisionTime": "2016-06-03T16:17:08Z"
+ },
+ {
+ "checksumSHA1": "ttxyyPnlmMDqX+sY10BwbwwA+jo=",
+ "path": "github.com/aws/aws-sdk-go/private/protocol/restxml",
+ "revision": "1608a1f6ffe7f327007d2aca50ebf1d26c285ef2",
+ "revisionTime": "2016-06-03T16:17:08Z"
+ },
+ {
+ "checksumSHA1": "LsCIsjbzX2r3n/AhpNJvAC5ueNA=",
+ "path": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil",
+ "revision": "1608a1f6ffe7f327007d2aca50ebf1d26c285ef2",
+ "revisionTime": "2016-06-03T16:17:08Z"
+ },
+ {
+ "checksumSHA1": "wZbHPxkyYsr5h6GW5OVh9qIMZR8=",
+ "path": "github.com/aws/aws-sdk-go/private/signer/v4",
+ "revision": "1608a1f6ffe7f327007d2aca50ebf1d26c285ef2",
+ "revisionTime": "2016-06-03T16:17:08Z"
+ },
+ {
+ "checksumSHA1": "Eo9yODN5U99BK0pMzoqnBm7PCrY=",
+ "path": "github.com/aws/aws-sdk-go/private/waiter",
+ "revision": "1608a1f6ffe7f327007d2aca50ebf1d26c285ef2",
+ "revisionTime": "2016-06-03T16:17:08Z"
+ },
+ {
+ "checksumSHA1": "BA/gv0KordTClvOxXmhBZTnesqo=",
+ "path": "github.com/aws/aws-sdk-go/service/s3",
+ "revision": "1608a1f6ffe7f327007d2aca50ebf1d26c285ef2",
+ "revisionTime": "2016-06-03T16:17:08Z"
+ },
+ {
"checksumSHA1": "5rPfda8jFccr3A6heL+JAmi9K9g=",
"origin": "github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew",
"path": "github.com/davecgh/go-spew/spew",
@@ -22,6 +154,19 @@
"revisionTime": "2016-04-12T13:37:56Z"
},
{
+ "checksumSHA1": "FCeEm2BWZV/n4oTy+SGd/k0Ab5c=",
+ "origin": "github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini",
+ "path": "github.com/go-ini/ini",
+ "revision": "1608a1f6ffe7f327007d2aca50ebf1d26c285ef2",
+ "revisionTime": "2016-06-03T16:17:08Z"
+ },
+ {
+ "checksumSHA1": "r2217M+60ykOkomU6p2mO4pLzug=",
+ "path": "github.com/golang/protobuf/proto",
+ "revision": "dda510ac0fd43b39770f22ac6260eb91d377bce3",
+ "revisionTime": "2016-03-30T00:19:00Z"
+ },
+ {
"checksumSHA1": "D0XN/mcGIua2WyaEXA1I54jRfgY=",
"path": "github.com/google/go-github/github",
"revision": "7ec4e45f77474b3a7b7d9b7ef233a0cc8339ddf3",
@@ -125,6 +270,13 @@
"revisionTime": "2016-04-26T22:13:00Z"
},
{
+ "checksumSHA1": "0ZrwvB6KoGPj2PoDNSEJwxQ6Mog=",
+ "origin": "github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath",
+ "path": "github.com/jmespath/go-jmespath",
+ "revision": "1608a1f6ffe7f327007d2aca50ebf1d26c285ef2",
+ "revisionTime": "2016-06-03T16:17:08Z"
+ },
+ {
"checksumSHA1": "eqs9e0Fw1wlUZCj/ZtrSwnPsJ54=",
"path": "github.com/magiconair/properties",
"revision": "c265cfa48dda6474e208715ca93e987829f572f8",
@@ -180,6 +332,18 @@
"revisionTime": "2016-04-18T22:58:27Z"
},
{
+ "checksumSHA1": "BS9oue0y6JjMzz3spKlMTVmxZxo=",
+ "path": "go4.org/wkfs",
+ "revision": "03efcb870d84809319ea509714dd6d19a1498483",
+ "revisionTime": "2016-03-14T03:18:11Z"
+ },
+ {
+ "checksumSHA1": "b2rJCXYVgHeaVQmeCIJDsq8ljUo=",
+ "path": "go4.org/wkfs/gcs",
+ "revision": "03efcb870d84809319ea509714dd6d19a1498483",
+ "revisionTime": "2016-03-14T03:18:11Z"
+ },
+ {
"checksumSHA1": "h+pFYiRHBogczS8/F1NoN3Ata44=",
"path": "golang.org/x/crypto/curve25519",
"revision": "5bcd134fee4dd1475da17714aac19c0aa0142e2f",
@@ -222,6 +386,36 @@
"revisionTime": "2016-05-21T00:18:04Z"
},
{
+ "checksumSHA1": "yI7CF4wcJKGP6SCWPNUtByEwPKo=",
+ "path": "golang.org/x/net/http2",
+ "revision": "0c607074acd38c5f23d1344dfe74c977464d1257",
+ "revisionTime": "2016-05-21T00:18:04Z"
+ },
+ {
+ "checksumSHA1": "EYNaHp7XdLWRydUCE0amEkKAtgk=",
+ "path": "golang.org/x/net/http2/hpack",
+ "revision": "0c607074acd38c5f23d1344dfe74c977464d1257",
+ "revisionTime": "2016-05-21T00:18:04Z"
+ },
+ {
+ "checksumSHA1": "/k7k6eJDkxXx6K9Zpo/OwNm58XM=",
+ "path": "golang.org/x/net/internal/timeseries",
+ "revision": "0c607074acd38c5f23d1344dfe74c977464d1257",
+ "revisionTime": "2016-05-21T00:18:04Z"
+ },
+ {
+ "checksumSHA1": "yhndhWXMs/VSEDLks4dNyFMQStA=",
+ "path": "golang.org/x/net/lex/httplex",
+ "revision": "0c607074acd38c5f23d1344dfe74c977464d1257",
+ "revisionTime": "2016-05-21T00:18:04Z"
+ },
+ {
+ "checksumSHA1": "WpST9lFOHvWrjDVy0GJNqHe+R3E=",
+ "path": "golang.org/x/net/trace",
+ "revision": "0c607074acd38c5f23d1344dfe74c977464d1257",
+ "revisionTime": "2016-05-21T00:18:04Z"
+ },
+ {
"checksumSHA1": "mktBVED98G2vv+OKcSgtnFVZC1Y=",
"path": "golang.org/x/oauth2",
"revision": "c406a4cc4ba462e5dc2f16225c5bd9488f9cbe10",
@@ -288,6 +482,72 @@
"revisionTime": "2016-05-12T05:45:26Z"
},
{
+ "checksumSHA1": "ZD0lo4eyWr+WoOBg9NA9kD/tS04=",
+ "path": "google.golang.org/api/storage/v1",
+ "revision": "7059a7f3d456870ff1e8424a6b1f62abe5f95f36",
+ "revisionTime": "2016-05-12T05:45:26Z"
+ },
+ {
+ "checksumSHA1": "N3KZEuQ9O1QwJXcCJbe7Czwroo4=",
+ "path": "google.golang.org/appengine",
+ "revision": "7f59a8c76b8594d06044bfe0bcbe475cb2020482",
+ "revisionTime": "2016-05-16T20:30:09Z"
+ },
+ {
+ "checksumSHA1": "sX/mhkTnCygMMb8zew37wpSfnjQ=",
+ "path": "google.golang.org/appengine/internal",
+ "revision": "7f59a8c76b8594d06044bfe0bcbe475cb2020482",
+ "revisionTime": "2016-05-16T20:30:09Z"
+ },
+ {
+ "checksumSHA1": "x6Thdfyasqd68dWZWqzWWeIfAfI=",
+ "path": "google.golang.org/appengine/internal/app_identity",
+ "revision": "7f59a8c76b8594d06044bfe0bcbe475cb2020482",
+ "revisionTime": "2016-05-16T20:30:09Z"
+ },
+ {
+ "checksumSHA1": "TsNO8P0xUlLNyh3Ic/tzSp/fDWM=",
+ "path": "google.golang.org/appengine/internal/base",
+ "revision": "7f59a8c76b8594d06044bfe0bcbe475cb2020482",
+ "revisionTime": "2016-05-16T20:30:09Z"
+ },
+ {
+ "checksumSHA1": "5QsV5oLGSfKZqTCVXP6NRz5T4Tw=",
+ "path": "google.golang.org/appengine/internal/datastore",
+ "revision": "7f59a8c76b8594d06044bfe0bcbe475cb2020482",
+ "revisionTime": "2016-05-16T20:30:09Z"
+ },
+ {
+ "checksumSHA1": "eLZVX1EHLclFtQnjDIszsdyWRHo=",
+ "path": "google.golang.org/appengine/internal/modules",
+ "revision": "7f59a8c76b8594d06044bfe0bcbe475cb2020482",
+ "revisionTime": "2016-05-16T20:30:09Z"
+ },
+ {
+ "checksumSHA1": "a1XY7rz3BieOVqVI2Et6rKiwQCk=",
+ "path": "google.golang.org/appengine/internal/remote_api",
+ "revision": "7f59a8c76b8594d06044bfe0bcbe475cb2020482",
+ "revisionTime": "2016-05-16T20:30:09Z"
+ },
+ {
+ "checksumSHA1": "QtAbHtHmDzcf6vOV9eqlCpKgjiw=",
+ "path": "google.golang.org/appengine/internal/urlfetch",
+ "revision": "7f59a8c76b8594d06044bfe0bcbe475cb2020482",
+ "revisionTime": "2016-05-16T20:30:09Z"
+ },
+ {
+ "checksumSHA1": "akOV9pYnCbcPA8wJUutSQVibdyg=",
+ "path": "google.golang.org/appengine/urlfetch",
+ "revision": "7f59a8c76b8594d06044bfe0bcbe475cb2020482",
+ "revisionTime": "2016-05-16T20:30:09Z"
+ },
+ {
+ "checksumSHA1": "jNZvXfhCPUnOxOura2EJunSkXYQ=",
+ "path": "google.golang.org/cloud",
+ "revision": "4f2a141928415b2748b2277f2a673da68863fb5c",
+ "revisionTime": "2016-05-19T06:28:48Z"
+ },
+ {
"checksumSHA1": "Wp8g9MHRmK8SwcyGVCoGtPx+5Lo=",
"path": "google.golang.org/cloud/compute/metadata",
"revision": "4f2a141928415b2748b2277f2a673da68863fb5c",
@@ -300,6 +560,84 @@
"revisionTime": "2016-05-19T06:28:48Z"
},
{
+ "checksumSHA1": "sYR6mMwX7a3dCYpIWUoe7pZ4yxU=",
+ "path": "google.golang.org/cloud/internal/opts",
+ "revision": "4f2a141928415b2748b2277f2a673da68863fb5c",
+ "revisionTime": "2016-05-19T06:28:48Z"
+ },
+ {
+ "checksumSHA1": "cE4oINk9dRO5CqZe1/sIof0PT+M=",
+ "path": "google.golang.org/cloud/internal/transport",
+ "revision": "4f2a141928415b2748b2277f2a673da68863fb5c",
+ "revisionTime": "2016-05-19T06:28:48Z"
+ },
+ {
+ "checksumSHA1": "0B4pQeLYCwm40eN8ZteBqxcdAFU=",
+ "path": "google.golang.org/cloud/storage",
+ "revision": "4f2a141928415b2748b2277f2a673da68863fb5c",
+ "revisionTime": "2016-05-19T06:28:48Z"
+ },
+ {
+ "checksumSHA1": "mcWmnpJlYRe7FPxRS3WzvHX7020=",
+ "path": "google.golang.org/grpc",
+ "revision": "ce6213360cb8fadd0b08f9513406176ec931c5f7",
+ "revisionTime": "2016-06-03T01:42:51Z"
+ },
+ {
+ "checksumSHA1": "08icuA15HRkdYCt6H+Cs90RPQsY=",
+ "path": "google.golang.org/grpc/codes",
+ "revision": "ce6213360cb8fadd0b08f9513406176ec931c5f7",
+ "revisionTime": "2016-06-03T01:42:51Z"
+ },
+ {
+ "checksumSHA1": "a+C+FoyX7Tc4iNFhBndt/y/iPEg=",
+ "path": "google.golang.org/grpc/credentials",
+ "revision": "ce6213360cb8fadd0b08f9513406176ec931c5f7",
+ "revisionTime": "2016-06-03T01:42:51Z"
+ },
+ {
+ "checksumSHA1": "aKQ51Peys87Nx4MPmO7yLYWPliM=",
+ "path": "google.golang.org/grpc/credentials/oauth",
+ "revision": "ce6213360cb8fadd0b08f9513406176ec931c5f7",
+ "revisionTime": "2016-06-03T01:42:51Z"
+ },
+ {
+ "checksumSHA1": "3Lt5hNAG8qJAYSsNghR5uA1zQns=",
+ "path": "google.golang.org/grpc/grpclog",
+ "revision": "ce6213360cb8fadd0b08f9513406176ec931c5f7",
+ "revisionTime": "2016-06-03T01:42:51Z"
+ },
+ {
+ "checksumSHA1": "T3Q0p8kzvXFnRkMaK/G8mCv6mc0=",
+ "path": "google.golang.org/grpc/internal",
+ "revision": "ce6213360cb8fadd0b08f9513406176ec931c5f7",
+ "revisionTime": "2016-06-03T01:42:51Z"
+ },
+ {
+ "checksumSHA1": "IlzBp7dylW8F4D7eINj8xHt/jMk=",
+ "path": "google.golang.org/grpc/metadata",
+ "revision": "ce6213360cb8fadd0b08f9513406176ec931c5f7",
+ "revisionTime": "2016-06-03T01:42:51Z"
+ },
+ {
+ "checksumSHA1": "4GSUFhOQ0kdFlBH4D5OTeKy78z0=",
+ "path": "google.golang.org/grpc/naming",
+ "revision": "ce6213360cb8fadd0b08f9513406176ec931c5f7",
+ "revisionTime": "2016-06-03T01:42:51Z"
+ },
+ {
+ "checksumSHA1": "3RRoLeH6X2//7tVClOVzxW2bY+E=",
+ "path": "google.golang.org/grpc/peer",
+ "revision": "ce6213360cb8fadd0b08f9513406176ec931c5f7",
+ "revisionTime": "2016-06-03T01:42:51Z"
+ },
+ {
+ "checksumSHA1": "r4Wi6RCeOkFP5SJ99RvAYIcnKbk=",
+ "path": "google.golang.org/grpc/transport",
+ "revision": "ce6213360cb8fadd0b08f9513406176ec931c5f7",
+ "revisionTime": "2016-06-03T01:42:51Z"
+ },
+ {
"checksumSHA1": "+OgOXBoiQ+X+C2dsAeiOHwBIEH0=",
"path": "gopkg.in/yaml.v2",
"revision": "a83829b6f1293c91addabc89d0571c246397bbf4",