aboutsummaryrefslogtreecommitdiff
path: root/vendor/google.golang.org/cloud
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/google.golang.org/cloud')
-rw-r--r--vendor/google.golang.org/cloud/AUTHORS14
-rw-r--r--vendor/google.golang.org/cloud/CONTRIBUTING.md115
-rw-r--r--vendor/google.golang.org/cloud/CONTRIBUTORS29
-rw-r--r--vendor/google.golang.org/cloud/README.md186
-rw-r--r--vendor/google.golang.org/cloud/cloud.go56
-rw-r--r--vendor/google.golang.org/cloud/internal/opts/option.go25
-rw-r--r--vendor/google.golang.org/cloud/internal/transport/dial.go101
-rw-r--r--vendor/google.golang.org/cloud/key.json.encbin0 -> 1248 bytes
-rw-r--r--vendor/google.golang.org/cloud/option.go114
-rw-r--r--vendor/google.golang.org/cloud/storage/acl.go204
-rw-r--r--vendor/google.golang.org/cloud/storage/reader.go55
-rw-r--r--vendor/google.golang.org/cloud/storage/storage.go1026
-rw-r--r--vendor/google.golang.org/cloud/storage/writer.go129
13 files changed, 2054 insertions, 0 deletions
diff --git a/vendor/google.golang.org/cloud/AUTHORS b/vendor/google.golang.org/cloud/AUTHORS
new file mode 100644
index 0000000..f92e5cf
--- /dev/null
+++ b/vendor/google.golang.org/cloud/AUTHORS
@@ -0,0 +1,14 @@
+# This is the official list of cloud authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS files.
+# See the latter for an explanation.
+
+# Names should be added to this file as:
+# Name or Organization <email address>
+# The email address is not required for organizations.
+
+Google Inc.
+Ingo Oeser <nightlyone@googlemail.com>
+Palm Stone Games, Inc.
+Paweł Knap <pawelknap88@gmail.com>
+Péter Szilágyi <peterke@gmail.com>
+Tyler Treat <ttreat31@gmail.com>
diff --git a/vendor/google.golang.org/cloud/CONTRIBUTING.md b/vendor/google.golang.org/cloud/CONTRIBUTING.md
new file mode 100644
index 0000000..135a1a1
--- /dev/null
+++ b/vendor/google.golang.org/cloud/CONTRIBUTING.md
@@ -0,0 +1,115 @@
+# Contributing
+
+1. Sign one of the contributor license agreements below.
+1. `go get golang.org/x/review/git-codereview` to install the code reviewing tool.
+1. Get the cloud package by running `go get -d google.golang.org/cloud`.
+ 1. If you have already checked out the source, make sure that the remote git
+ origin is https://code.googlesource.com/gocloud:
+
+ git remote set-url origin https://code.googlesource.com/gocloud
+1. Make sure your auth is configured correctly by visiting
+ https://code.googlesource.com, clicking "Generate Password", and following
+ the directions.
+1. Make changes and create a change by running `git codereview change <name>`,
+provide a commit message, and use `git codereview mail` to create a Gerrit CL.
+1. Keep amending to the change and mail as your receive feedback.
+
+## Integration Tests
+
+In addition to the unit tests, you may run the integration test suite.
+
+To run the integrations tests, creating and configuration of a project in the
+Google Developers Console is required. Once you create a project, set the
+following environment variables to be able to run the against the actual APIs.
+
+- **GCLOUD_TESTS_GOLANG_PROJECT_ID**: Developers Console project's ID (e.g. bamboo-shift-455)
+- **GCLOUD_TESTS_GOLANG_KEY**: The path to the JSON key file.
+
+Create a storage bucket with the same name as the project ID set in **GCLOUD_TESTS_GOLANG_PROJECT_ID**.
+The storage integration test will create and delete some objects in this bucket.
+
+Install the [gcloud command-line tool][gcloudcli] to your machine and use it
+to create the indexes used in the datastore integration tests with indexes
+found in `datastore/testdata/index.yaml`:
+
+From the project's root directory:
+
+``` sh
+# Set the default project in your env
+$ gcloud config set project $GCLOUD_TESTS_GOLANG_PROJECT_ID
+
+# Authenticate the gcloud tool with your account
+$ gcloud auth login
+
+# Create the indexes
+$ gcloud preview datastore create-indexes datastore/testdata/index.yaml
+
+```
+
+Once you've set the environment variables, you can run the integration tests by
+running:
+
+``` sh
+$ go test -v google.golang.org/cloud/...
+```
+
+## Contributor License Agreements
+
+Before we can accept your pull requests you'll need to sign a Contributor
+License Agreement (CLA):
+
+- **If you are an individual writing original source code** and **you own the
+- intellectual property**, then you'll need to sign an [individual CLA][indvcla].
+- **If you work for a company that wants to allow you to contribute your work**,
+then you'll need to sign a [corporate CLA][corpcla].
+
+You can sign these electronically (just scroll to the bottom). After that,
+we'll be able to accept your pull requests.
+
+## Contributor Code of Conduct
+
+As contributors and maintainers of this project,
+and in the interest of fostering an open and welcoming community,
+we pledge to respect all people who contribute through reporting issues,
+posting feature requests, updating documentation,
+submitting pull requests or patches, and other activities.
+
+We are committed to making participation in this project
+a harassment-free experience for everyone,
+regardless of level of experience, gender, gender identity and expression,
+sexual orientation, disability, personal appearance,
+body size, race, ethnicity, age, religion, or nationality.
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery
+* Personal attacks
+* Trolling or insulting/derogatory comments
+* Public or private harassment
+* Publishing other's private information,
+such as physical or electronic
+addresses, without explicit permission
+* Other unethical or unprofessional conduct.
+
+Project maintainers have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions
+that are not aligned to this Code of Conduct.
+By adopting this Code of Conduct,
+project maintainers commit themselves to fairly and consistently
+applying these principles to every aspect of managing this project.
+Project maintainers who do not follow or enforce the Code of Conduct
+may be permanently removed from the project team.
+
+This code of conduct applies both within project spaces and in public spaces
+when an individual is representing the project or its community.
+
+Instances of abusive, harassing, or otherwise unacceptable behavior
+may be reported by opening an issue
+or contacting one or more of the project maintainers.
+
+This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0,
+available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/)
+
+[gcloudcli]: https://developers.google.com/cloud/sdk/gcloud/
+[indvcla]: https://developers.google.com/open-source/cla/individual
+[corpcla]: https://developers.google.com/open-source/cla/corporate
diff --git a/vendor/google.golang.org/cloud/CONTRIBUTORS b/vendor/google.golang.org/cloud/CONTRIBUTORS
new file mode 100644
index 0000000..27db791
--- /dev/null
+++ b/vendor/google.golang.org/cloud/CONTRIBUTORS
@@ -0,0 +1,29 @@
+# People who have agreed to one of the CLAs and can contribute patches.
+# The AUTHORS file lists the copyright holders; this file
+# lists people. For example, Google employees are listed here
+# but not in AUTHORS, because Google holds the copyright.
+#
+# https://developers.google.com/open-source/cla/individual
+# https://developers.google.com/open-source/cla/corporate
+#
+# Names should be added to this file as:
+# Name <email address>
+
+# Keep the list alphabetically sorted.
+
+Andrew Gerrand <adg@golang.org>
+Brad Fitzpatrick <bradfitz@golang.org>
+Burcu Dogan <jbd@google.com>
+Dave Day <djd@golang.org>
+David Sansome <me@davidsansome.com>
+David Symonds <dsymonds@golang.org>
+Glenn Lewis <gmlewis@google.com>
+Ingo Oeser <nightlyone@googlemail.com>
+Johan Euphrosine <proppy@google.com>
+Luna Duclos <luna.duclos@palmstonegames.com>
+Michael McGreevy <mcgreevy@golang.org>
+Omar Jarjur <ojarjur@google.com>
+Paweł Knap <pawelknap88@gmail.com>
+Péter Szilágyi <peterke@gmail.com>
+Toby Burress <kurin@google.com>
+Tyler Treat <ttreat31@gmail.com>
diff --git a/vendor/google.golang.org/cloud/README.md b/vendor/google.golang.org/cloud/README.md
new file mode 100644
index 0000000..13b7a0d
--- /dev/null
+++ b/vendor/google.golang.org/cloud/README.md
@@ -0,0 +1,186 @@
+# Google Cloud for Go
+
+[![Build Status](https://travis-ci.org/GoogleCloudPlatform/gcloud-golang.svg?branch=master)](https://travis-ci.org/GoogleCloudPlatform/gcloud-golang)
+[![GoDoc](https://godoc.org/google.golang.org/cloud?status.svg)](https://godoc.org/google.golang.org/cloud)
+
+``` go
+import "google.golang.org/cloud"
+```
+
+**NOTE:** These packages are under development, and may occasionally make
+backwards-incompatible changes.
+
+**NOTE:** Github repo is a mirror of [https://code.googlesource.com/gocloud](https://code.googlesource.com/gocloud).
+
+Go packages for Google Cloud Platform services. Supported APIs are:
+
+Google API | Status | Package
+-------------------------------|--------------|-----------------------------------------------------------
+[Datastore][cloud-datastore] | beta | [`google.golang.org/cloud/datastore`][cloud-datastore-ref]
+[Storage][cloud-storage] | beta | [`google.golang.org/cloud/storage`][cloud-storage-ref]
+[Pub/Sub][cloud-pubsub] | experimental | [`google.golang.org/cloud/pubsub`][cloud-pubsub-ref]
+[BigTable][cloud-bigtable] | stable | [`google.golang.org/cloud/bigtable`][cloud-bigtable-ref]
+[BigQuery][cloud-bigquery] | experimental | [`google.golang.org/cloud/bigquery`][cloud-bigquery-ref]
+[Logging][cloud-logging] | experimental | [`google.golang.org/cloud/logging`][cloud-logging-ref]
+
+> **Experimental status**: the API is still being actively developed. As a
+> result, it might change in backward-incompatible ways and is not recommended
+> for production use.
+>
+> **Beta status**: the API is largely complete, but still has outstanding
+> features and bugs to be addressed. There may be minor backwards-incompatible
+> changes where necessary.
+>
+> **Stable status**: the API is mature and ready for production use. We will
+> continue addressing bugs and feature requests.
+
+Documentation and examples are available at
+https://godoc.org/google.golang.org/cloud
+
+## Authorization
+
+By default, each API will use [Google Application Default Credentials][default-creds]
+for authorization credentials used in calling the API endpoints. This will allow your
+application to run in many environments without requiring explicit configuration.
+
+Manually-configured authorization can be achieved using the
+[`golang.org/x/oauth2`](https://godoc.org/golang.org/x/oauth2) package to
+create an `oauth2.TokenSource`. This token source can be passed to the `NewClient`
+function for the relevant API using a
+[`cloud.WithTokenSource`](https://godoc.org/google.golang.org/cloud#WithTokenSource)
+option.
+
+## Google Cloud Datastore
+
+[Google Cloud Datastore][cloud-datastore] ([docs][cloud-datastore-docs]) is a fully-
+managed, schemaless database for storing non-relational data. Cloud Datastore
+automatically scales with your users and supports ACID transactions, high availability
+of reads and writes, strong consistency for reads and ancestor queries, and eventual
+consistency for all other queries.
+
+Follow the [activation instructions][cloud-datastore-activation] to use the Google
+Cloud Datastore API with your project.
+
+https://godoc.org/google.golang.org/cloud/datastore
+
+First create a `datastore.Client` to use throughout your application:
+
+```go
+client, err := datastore.NewClient(ctx, "my-project-id")
+if err != nil {
+ log.Fatalln(err)
+}
+```
+
+Then use that client to interact with the API:
+
+```go
+type Post struct {
+ Title string
+ Body string `datastore:",noindex"`
+ PublishedAt time.Time
+}
+keys := []*datastore.Key{
+ datastore.NewKey(ctx, "Post", "post1", 0, nil),
+ datastore.NewKey(ctx, "Post", "post2", 0, nil),
+}
+posts := []*Post{
+ {Title: "Post 1", Body: "...", PublishedAt: time.Now()},
+ {Title: "Post 2", Body: "...", PublishedAt: time.Now()},
+}
+if _, err := client.PutMulti(ctx, keys, posts); err != nil {
+ log.Fatal(err)
+}
+```
+
+## Google Cloud Storage
+
+[Google Cloud Storage][cloud-storage] ([docs][cloud-storage-docs]) allows you to store
+data on Google infrastructure with very high reliability, performance and availability,
+and can be used to distribute large data objects to users via direct download.
+
+https://godoc.org/google.golang.org/cloud/storage
+
+First create a `storage.Client` to use throughout your application:
+
+```go
+client, err := storage.NewClient(ctx)
+if err != nil {
+ log.Fatal(err)
+}
+```
+
+```go
+// Read the object1 from bucket.
+rc, err := client.Bucket("bucket").Object("object1").NewReader(ctx)
+if err != nil {
+ log.Fatal(err)
+}
+defer rc.Close()
+body, err := ioutil.ReadAll(rc)
+if err != nil {
+ log.Fatal(err)
+}
+```
+
+## Google Cloud Pub/Sub
+
+[Google Cloud Pub/Sub][cloud-pubsub] ([docs][cloud-pubsub-docs]) allows you to connect
+your services with reliable, many-to-many, asynchronous messaging hosted on Google's
+infrastructure. Cloud Pub/Sub automatically scales as you need it and provides a foundation
+for building your own robust, global services.
+
+https://godoc.org/google.golang.org/cloud/pubsub
+
+
+```go
+// Publish "hello world" on topic1.
+msgIDs, err := pubsub.Publish(ctx, "topic1", &pubsub.Message{
+ Data: []byte("hello world"),
+})
+if err != nil {
+ log.Println(err)
+}
+// Pull messages via subscription1.
+msgs, err := pubsub.Pull(ctx, "subscription1", 1)
+if err != nil {
+ log.Println(err)
+}
+```
+
+## Contributing
+
+Contributions are welcome. Please, see the
+[CONTRIBUTING](https://github.com/GoogleCloudPlatform/gcloud-golang/blob/master/CONTRIBUTING.md)
+document for details. We're using Gerrit for our code reviews. Please don't open pull
+requests against this repo, new pull requests will be automatically closed.
+
+Please note that this project is released with a Contributor Code of Conduct.
+By participating in this project you agree to abide by its terms.
+See [Contributor Code of Conduct](https://github.com/GoogleCloudPlatform/gcloud-golang/blob/master/CONTRIBUTING.md#contributor-code-of-conduct)
+for more information.
+
+[cloud-datastore]: https://cloud.google.com/datastore/
+[cloud-datastore-ref]: https://godoc.org/google.golang.org/cloud/datastore
+[cloud-datastore-docs]: https://cloud.google.com/datastore/docs
+[cloud-datastore-activation]: https://cloud.google.com/datastore/docs/activate
+
+[cloud-pubsub]: https://cloud.google.com/pubsub/
+[cloud-pubsub-ref]: https://godoc.org/google.golang.org/cloud/pubsub
+[cloud-pubsub-docs]: https://cloud.google.com/pubsub/docs
+
+[cloud-storage]: https://cloud.google.com/storage/
+[cloud-storage-ref]: https://godoc.org/google.golang.org/cloud/storage
+[cloud-storage-docs]: https://cloud.google.com/storage/docs/overview
+[cloud-storage-create-bucket]: https://cloud.google.com/storage/docs/cloud-console#_creatingbuckets
+
+[cloud-bigtable]: https://cloud.google.com/bigtable/
+[cloud-bigtable-ref]: https://godoc.org/google.golang.org/cloud/bigtable
+
+[cloud-bigquery]: https://cloud.google.com/bigquery/
+[cloud-bigquery-ref]: https://godoc.org/google.golang.org/cloud/bigquery
+
+[cloud-logging]: https://cloud.google.com/logging/
+[cloud-logging-ref]: https://godoc.org/google.golang.org/cloud/logging
+
+[default-creds]: https://developers.google.com/identity/protocols/application-default-credentials
diff --git a/vendor/google.golang.org/cloud/cloud.go b/vendor/google.golang.org/cloud/cloud.go
new file mode 100644
index 0000000..98be1f4
--- /dev/null
+++ b/vendor/google.golang.org/cloud/cloud.go
@@ -0,0 +1,56 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package cloud contains Google Cloud Platform APIs related types
+// and common functions.
+package cloud // import "google.golang.org/cloud"
+
+import (
+ "net/http"
+
+ "golang.org/x/net/context"
+ "google.golang.org/cloud/internal"
+)
+
+// NewContext returns a new context that uses the provided http.Client.
+// Provided http.Client is responsible to authorize and authenticate
+// the requests made to the Google Cloud APIs.
+// It mutates the client's original Transport to append the cloud
+// package's user-agent to the outgoing requests.
+// You can obtain the project ID from the Google Developers Console,
+// https://console.developers.google.com.
+func NewContext(projID string, c *http.Client) context.Context {
+ if c == nil {
+ panic("invalid nil *http.Client passed to NewContext")
+ }
+ return WithContext(context.Background(), projID, c)
+}
+
+// WithContext returns a new context in a similar way NewContext does,
+// but initiates the new context with the specified parent.
+func WithContext(parent context.Context, projID string, c *http.Client) context.Context {
+ // TODO(bradfitz): delete internal.Transport. It's too wrappy for what it does.
+ // Do User-Agent some other way.
+ if c == nil {
+ panic("invalid nil *http.Client passed to WithContext")
+ }
+ if _, ok := c.Transport.(*internal.Transport); !ok {
+ base := c.Transport
+ if base == nil {
+ base = http.DefaultTransport
+ }
+ c.Transport = &internal.Transport{Base: base}
+ }
+ return internal.WithContext(parent, projID, c)
+}
diff --git a/vendor/google.golang.org/cloud/internal/opts/option.go b/vendor/google.golang.org/cloud/internal/opts/option.go
new file mode 100644
index 0000000..844d310
--- /dev/null
+++ b/vendor/google.golang.org/cloud/internal/opts/option.go
@@ -0,0 +1,25 @@
+// Package opts holds the DialOpts struct, configurable by
+// cloud.ClientOptions to set up transports for cloud packages.
+//
+// This is a separate page to prevent cycles between the core
+// cloud packages.
+package opts
+
+import (
+ "net/http"
+
+ "golang.org/x/oauth2"
+ "google.golang.org/grpc"
+)
+
+type DialOpt struct {
+ Endpoint string
+ Scopes []string
+ UserAgent string
+
+ TokenSource oauth2.TokenSource
+
+ HTTPClient *http.Client
+ GRPCClient *grpc.ClientConn
+ GRPCDialOpts []grpc.DialOption
+}
diff --git a/vendor/google.golang.org/cloud/internal/transport/dial.go b/vendor/google.golang.org/cloud/internal/transport/dial.go
new file mode 100644
index 0000000..a0f8bd9
--- /dev/null
+++ b/vendor/google.golang.org/cloud/internal/transport/dial.go
@@ -0,0 +1,101 @@
+// Copyright 2015 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/google"
+ "google.golang.org/cloud"
+ "google.golang.org/cloud/internal/opts"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/credentials/oauth"
+)
+
+// ErrHTTP is returned when on a non-200 HTTP response.
+type ErrHTTP struct {
+ StatusCode int
+ Body []byte
+ err error
+}
+
+func (e *ErrHTTP) Error() string {
+ if e.err == nil {
+ return fmt.Sprintf("error during call, http status code: %v %s", e.StatusCode, e.Body)
+ }
+ return e.err.Error()
+}
+
+// NewHTTPClient returns an HTTP client for use communicating with a Google cloud
+// service, configured with the given ClientOptions. It also returns the endpoint
+// for the service as specified in the options.
+func NewHTTPClient(ctx context.Context, opt ...cloud.ClientOption) (*http.Client, string, error) {
+ var o opts.DialOpt
+ for _, opt := range opt {
+ opt.Resolve(&o)
+ }
+ if o.GRPCClient != nil {
+ return nil, "", errors.New("unsupported GRPC base transport specified")
+ }
+ // TODO(djd): Wrap all http.Clients with appropriate internal version to add
+ // UserAgent header and prepend correct endpoint.
+ if o.HTTPClient != nil {
+ return o.HTTPClient, o.Endpoint, nil
+ }
+ if o.TokenSource == nil {
+ var err error
+ o.TokenSource, err = google.DefaultTokenSource(ctx, o.Scopes...)
+ if err != nil {
+ return nil, "", fmt.Errorf("google.DefaultTokenSource: %v", err)
+ }
+ }
+ return oauth2.NewClient(ctx, o.TokenSource), o.Endpoint, nil
+}
+
+// DialGRPC returns a GRPC connection for use communicating with a Google cloud
+// service, configured with the given ClientOptions.
+func DialGRPC(ctx context.Context, opt ...cloud.ClientOption) (*grpc.ClientConn, error) {
+ var o opts.DialOpt
+ for _, opt := range opt {
+ opt.Resolve(&o)
+ }
+ if o.HTTPClient != nil {
+ return nil, errors.New("unsupported HTTP base transport specified")
+ }
+ if o.GRPCClient != nil {
+ return o.GRPCClient, nil
+ }
+ if o.TokenSource == nil {
+ var err error
+ o.TokenSource, err = google.DefaultTokenSource(ctx, o.Scopes...)
+ if err != nil {
+ return nil, fmt.Errorf("google.DefaultTokenSource: %v", err)
+ }
+ }
+ grpcOpts := []grpc.DialOption{
+ grpc.WithPerRPCCredentials(oauth.TokenSource{o.TokenSource}),
+ grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(nil, "")),
+ }
+ grpcOpts = append(grpcOpts, o.GRPCDialOpts...)
+ if o.UserAgent != "" {
+ grpcOpts = append(grpcOpts, grpc.WithUserAgent(o.UserAgent))
+ }
+ return grpc.Dial(o.Endpoint, grpcOpts...)
+}
diff --git a/vendor/google.golang.org/cloud/key.json.enc b/vendor/google.golang.org/cloud/key.json.enc
new file mode 100644
index 0000000..2f673a8
--- /dev/null
+++ b/vendor/google.golang.org/cloud/key.json.enc
Binary files differ
diff --git a/vendor/google.golang.org/cloud/option.go b/vendor/google.golang.org/cloud/option.go
new file mode 100644
index 0000000..8a443b4
--- /dev/null
+++ b/vendor/google.golang.org/cloud/option.go
@@ -0,0 +1,114 @@
+// Copyright 2015 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package cloud
+
+import (
+ "net/http"
+
+ "golang.org/x/oauth2"
+ "google.golang.org/cloud/internal/opts"
+ "google.golang.org/grpc"
+)
+
+// ClientOption is used when construct clients for each cloud service.
+type ClientOption interface {
+ // Resolve configures the given DialOpts for this option.
+ Resolve(*opts.DialOpt)
+}
+
+// WithTokenSource returns a ClientOption that specifies an OAuth2 token
+// source to be used as the basis for authentication.
+func WithTokenSource(s oauth2.TokenSource) ClientOption {
+ return withTokenSource{s}
+}
+
+type withTokenSource struct{ ts oauth2.TokenSource }
+
+func (w withTokenSource) Resolve(o *opts.DialOpt) {
+ o.TokenSource = w.ts
+}
+
+// WithEndpoint returns a ClientOption that overrides the default endpoint
+// to be used for a service.
+func WithEndpoint(url string) ClientOption {
+ return withEndpoint(url)
+}
+
+type withEndpoint string
+
+func (w withEndpoint) Resolve(o *opts.DialOpt) {
+ o.Endpoint = string(w)
+}
+
+// WithScopes returns a ClientOption that overrides the default OAuth2 scopes
+// to be used for a service.
+func WithScopes(scope ...string) ClientOption {
+ return withScopes(scope)
+}
+
+type withScopes []string
+
+func (w withScopes) Resolve(o *opts.DialOpt) {
+ s := make([]string, len(w))
+ copy(s, w)
+ o.Scopes = s
+}
+
+// WithUserAgent returns a ClientOption that sets the User-Agent.
+func WithUserAgent(ua string) ClientOption {
+ return withUA(ua)
+}
+
+type withUA string
+
+func (w withUA) Resolve(o *opts.DialOpt) { o.UserAgent = string(w) }
+
+// WithBaseHTTP returns a ClientOption that specifies the HTTP client to
+// use as the basis of communications. This option may only be used with
+// services that support HTTP as their communication transport.
+func WithBaseHTTP(client *http.Client) ClientOption {
+ return withBaseHTTP{client}
+}
+
+type withBaseHTTP struct{ client *http.Client }
+
+func (w withBaseHTTP) Resolve(o *opts.DialOpt) {
+ o.HTTPClient = w.client
+}
+
+// WithBaseGRPC returns a ClientOption that specifies the gRPC client
+// connection to use as the basis of communications. This option many only be
+// used with services that support gRPC as their communication transport.
+func WithBaseGRPC(client *grpc.ClientConn) ClientOption {
+ return withBaseGRPC{client}
+}
+
+type withBaseGRPC struct{ client *grpc.ClientConn }
+
+func (w withBaseGRPC) Resolve(o *opts.DialOpt) {
+ o.GRPCClient = w.client
+}
+
+// WithGRPCDialOption returns a ClientOption that appends a new grpc.DialOption
+// to an underlying gRPC dial. It does not work with WithBaseGRPC.
+func WithGRPCDialOption(opt grpc.DialOption) ClientOption {
+ return withGRPCDialOption{opt}
+}
+
+type withGRPCDialOption struct{ opt grpc.DialOption }
+
+func (w withGRPCDialOption) Resolve(o *opts.DialOpt) {
+ o.GRPCDialOpts = append(o.GRPCDialOpts, w.opt)
+}
diff --git a/vendor/google.golang.org/cloud/storage/acl.go b/vendor/google.golang.org/cloud/storage/acl.go
new file mode 100644
index 0000000..1c7be32
--- /dev/null
+++ b/vendor/google.golang.org/cloud/storage/acl.go
@@ -0,0 +1,204 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+ "fmt"
+
+ "golang.org/x/net/context"
+ raw "google.golang.org/api/storage/v1"
+)
+
+// ACLRole is the level of access to grant.
+type ACLRole string
+
+const (
+ RoleOwner ACLRole = "OWNER"
+ RoleReader ACLRole = "READER"
+)
+
+// ACLEntity refers to a user or group.
+// They are sometimes referred to as grantees.
+//
+// It could be in the form of:
+// "user-<userId>", "user-<email>", "group-<groupId>", "group-<email>",
+// "domain-<domain>" and "project-team-<projectId>".
+//
+// Or one of the predefined constants: AllUsers, AllAuthenticatedUsers.
+type ACLEntity string
+
+const (
+ AllUsers ACLEntity = "allUsers"
+ AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers"
+)
+
+// ACLRule represents a grant for a role to an entity (user, group or team) for a Google Cloud Storage object or bucket.
+type ACLRule struct {
+ Entity ACLEntity
+ Role ACLRole
+}
+
+// ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object.
+type ACLHandle struct {
+ c *Client
+ bucket string
+ object string
+ isDefault bool
+}
+
+// Delete permanently deletes the ACL entry for the given entity.
+func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) error {
+ if a.object != "" {
+ return a.objectDelete(ctx, entity)
+ }
+ if a.isDefault {
+ return a.bucketDefaultDelete(ctx, entity)
+ }
+ return a.bucketDelete(ctx, entity)
+}
+
+// Set sets the permission level for the given entity.
+func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) error {
+ if a.object != "" {
+ return a.objectSet(ctx, entity, role)
+ }
+ if a.isDefault {
+ return a.bucketDefaultSet(ctx, entity, role)
+ }
+ return a.bucketSet(ctx, entity, role)
+}
+
+// List retrieves ACL entries.
+func (a *ACLHandle) List(ctx context.Context) ([]ACLRule, error) {
+ if a.object != "" {
+ return a.objectList(ctx)
+ }
+ if a.isDefault {
+ return a.bucketDefaultList(ctx)
+ }
+ return a.bucketList(ctx)
+}
+
+func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) {
+ acls, err := a.c.raw.DefaultObjectAccessControls.List(a.bucket).Context(ctx).Do()
+ if err != nil {
+ return nil, fmt.Errorf("storage: error listing default object ACL for bucket %q: %v", a.bucket, err)
+ }
+ r := make([]ACLRule, 0, len(acls.Items))
+ for _, v := range acls.Items {
+ if m, ok := v.(map[string]interface{}); ok {
+ entity, ok1 := m["entity"].(string)
+ role, ok2 := m["role"].(string)
+ if ok1 && ok2 {
+ r = append(r, ACLRule{Entity: ACLEntity(entity), Role: ACLRole(role)})
+ }
+ }
+ }
+ return r, nil
+}
+
+func (a *ACLHandle) bucketDefaultSet(ctx context.Context, entity ACLEntity, role ACLRole) error {
+ acl := &raw.ObjectAccessControl{
+ Bucket: a.bucket,
+ Entity: string(entity),
+ Role: string(role),
+ }
+ _, err := a.c.raw.DefaultObjectAccessControls.Update(a.bucket, string(entity), acl).Context(ctx).Do()
+ if err != nil {
+ return fmt.Errorf("storage: error updating default ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
+ }
+ return nil
+}
+
+func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error {
+ err := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity)).Context(ctx).Do()
+ if err != nil {
+ return fmt.Errorf("storage: error deleting default ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
+ }
+ return nil
+}
+
+func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) {
+ acls, err := a.c.raw.BucketAccessControls.List(a.bucket).Context(ctx).Do()
+ if err != nil {
+ return nil, fmt.Errorf("storage: error listing bucket ACL for bucket %q: %v", a.bucket, err)
+ }
+ r := make([]ACLRule, len(acls.Items))
+ for i, v := range acls.Items {
+ r[i].Entity = ACLEntity(v.Entity)
+ r[i].Role = ACLRole(v.Role)
+ }
+ return r, nil
+}
+
+func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error {
+ acl := &raw.BucketAccessControl{
+ Bucket: a.bucket,
+ Entity: string(entity),
+ Role: string(role),
+ }
+ _, err := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl).Context(ctx).Do()
+ if err != nil {
+ return fmt.Errorf("storage: error updating bucket ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
+ }
+ return nil
+}
+
+func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error {
+ err := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity)).Context(ctx).Do()
+ if err != nil {
+ return fmt.Errorf("storage: error deleting bucket ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
+ }
+ return nil
+}
+
+func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) {
+ acls, err := a.c.raw.ObjectAccessControls.List(a.bucket, a.object).Context(ctx).Do()
+ if err != nil {
+ return nil, fmt.Errorf("storage: error listing object ACL for bucket %q, file %q: %v", a.bucket, a.object, err)
+ }
+ r := make([]ACLRule, 0, len(acls.Items))
+ for _, v := range acls.Items {
+ if m, ok := v.(map[string]interface{}); ok {
+ entity, ok1 := m["entity"].(string)
+ role, ok2 := m["role"].(string)
+ if ok1 && ok2 {
+ r = append(r, ACLRule{Entity: ACLEntity(entity), Role: ACLRole(role)})
+ }
+ }
+ }
+ return r, nil
+}
+
+func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole) error {
+ acl := &raw.ObjectAccessControl{
+ Bucket: a.bucket,
+ Entity: string(entity),
+ Role: string(role),
+ }
+ _, err := a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl).Context(ctx).Do()
+ if err != nil {
+ return fmt.Errorf("storage: error updating object ACL entry for bucket %q, file %q, entity %q: %v", a.bucket, a.object, entity, err)
+ }
+ return nil
+}
+
+func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error {
+ err := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity)).Context(ctx).Do()
+ if err != nil {
+ return fmt.Errorf("storage: error deleting object ACL entry for bucket %q, file %q, entity %q: %v", a.bucket, a.object, entity, err)
+ }
+ return nil
+}
diff --git a/vendor/google.golang.org/cloud/storage/reader.go b/vendor/google.golang.org/cloud/storage/reader.go
new file mode 100644
index 0000000..9e21648
--- /dev/null
+++ b/vendor/google.golang.org/cloud/storage/reader.go
@@ -0,0 +1,55 @@
+// Copyright 2016 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+ "io"
+)
+
+// Reader reads a Cloud Storage object.
+type Reader struct {
+ body io.ReadCloser
+ remain, size int64
+ contentType string
+}
+
+func (r *Reader) Close() error {
+ return r.body.Close()
+}
+
+func (r *Reader) Read(p []byte) (int, error) {
+ n, err := r.body.Read(p)
+ if r.remain != -1 {
+ r.remain -= int64(n)
+ }
+ return n, err
+}
+
+// Size returns the size of the object in bytes.
+// The returned value is always the same and is not affected by
+// calls to Read or Close.
+func (r *Reader) Size() int64 {
+ return r.size
+}
+
+// Remain returns the number of bytes left to read, or -1 if unknown.
+func (r *Reader) Remain() int64 {
+ return r.remain
+}
+
+// ContentType returns the content type of the object.
+func (r *Reader) ContentType() string {
+ return r.contentType
+}
diff --git a/vendor/google.golang.org/cloud/storage/storage.go b/vendor/google.golang.org/cloud/storage/storage.go
new file mode 100644
index 0000000..75f0e55
--- /dev/null
+++ b/vendor/google.golang.org/cloud/storage/storage.go
@@ -0,0 +1,1026 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package storage contains a Google Cloud Storage client.
+//
+// This package is experimental and may make backwards-incompatible changes.
+package storage // import "google.golang.org/cloud/storage"
+
+import (
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha256"
+ "crypto/x509"
+ "encoding/base64"
+ "encoding/pem"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "reflect"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "google.golang.org/cloud"
+ "google.golang.org/cloud/internal/transport"
+
+ "golang.org/x/net/context"
+ "google.golang.org/api/googleapi"
+ raw "google.golang.org/api/storage/v1"
+)
+
+var (
+ ErrBucketNotExist = errors.New("storage: bucket doesn't exist")
+ ErrObjectNotExist = errors.New("storage: object doesn't exist")
+)
+
+const userAgent = "gcloud-golang-storage/20151204"
+
+const (
+ // ScopeFullControl grants permissions to manage your
+ // data and permissions in Google Cloud Storage.
+ ScopeFullControl = raw.DevstorageFullControlScope
+
+ // ScopeReadOnly grants permissions to
+ // view your data in Google Cloud Storage.
+ ScopeReadOnly = raw.DevstorageReadOnlyScope
+
+ // ScopeReadWrite grants permissions to manage your
+ // data in Google Cloud Storage.
+ ScopeReadWrite = raw.DevstorageReadWriteScope
+)
+
+// AdminClient is a client type for performing admin operations on a project's
+// buckets.
+type AdminClient struct {
+ hc *http.Client
+ raw *raw.Service
+ projectID string
+}
+
+// NewAdminClient creates a new AdminClient for a given project.
+func NewAdminClient(ctx context.Context, projectID string, opts ...cloud.ClientOption) (*AdminClient, error) {
+ c, err := NewClient(ctx, opts...)
+ if err != nil {
+ return nil, err
+ }
+ return &AdminClient{
+ hc: c.hc,
+ raw: c.raw,
+ projectID: projectID,
+ }, nil
+}
+
+// Close closes the AdminClient.
+func (c *AdminClient) Close() error {
+ c.hc = nil
+ return nil
+}
+
+// Create creates a Bucket in the project.
+// If attrs is nil the API defaults will be used.
+func (c *AdminClient) CreateBucket(ctx context.Context, bucketName string, attrs *BucketAttrs) error {
+ var bkt *raw.Bucket
+ if attrs != nil {
+ bkt = attrs.toRawBucket()
+ } else {
+ bkt = &raw.Bucket{}
+ }
+ bkt.Name = bucketName
+ req := c.raw.Buckets.Insert(c.projectID, bkt)
+ _, err := req.Context(ctx).Do()
+ return err
+}
+
+// Delete deletes a Bucket in the project.
+func (c *AdminClient) DeleteBucket(ctx context.Context, bucketName string) error {
+ req := c.raw.Buckets.Delete(bucketName)
+ return req.Context(ctx).Do()
+}
+
+// Client is a client for interacting with Google Cloud Storage.
+type Client struct {
+ hc *http.Client
+ raw *raw.Service
+}
+
+// NewClient creates a new Google Cloud Storage client.
+// The default scope is ScopeFullControl. To use a different scope, like ScopeReadOnly, use cloud.WithScopes.
+func NewClient(ctx context.Context, opts ...cloud.ClientOption) (*Client, error) {
+ o := []cloud.ClientOption{
+ cloud.WithScopes(ScopeFullControl),
+ cloud.WithUserAgent(userAgent),
+ }
+ opts = append(o, opts...)
+ hc, _, err := transport.NewHTTPClient(ctx, opts...)
+ if err != nil {
+ return nil, fmt.Errorf("dialing: %v", err)
+ }
+ rawService, err := raw.New(hc)
+ if err != nil {
+ return nil, fmt.Errorf("storage client: %v", err)
+ }
+ return &Client{
+ hc: hc,
+ raw: rawService,
+ }, nil
+}
+
+// Close closes the Client.
+func (c *Client) Close() error {
+ c.hc = nil
+ return nil
+}
+
+// BucketHandle provides operations on a Google Cloud Storage bucket.
+// Use Client.Bucket to get a handle.
+type BucketHandle struct {
+ acl *ACLHandle
+ defaultObjectACL *ACLHandle
+
+ c *Client
+ name string
+}
+
+// Bucket returns a BucketHandle, which provides operations on the named bucket.
+// This call does not perform any network operations.
+//
+// name must contain only lowercase letters, numbers, dashes, underscores, and
+// dots. The full specification for valid bucket names can be found at:
+// https://cloud.google.com/storage/docs/bucket-naming
+func (c *Client) Bucket(name string) *BucketHandle {
+ return &BucketHandle{
+ c: c,
+ name: name,
+ acl: &ACLHandle{
+ c: c,
+ bucket: name,
+ },
+ defaultObjectACL: &ACLHandle{
+ c: c,
+ bucket: name,
+ isDefault: true,
+ },
+ }
+}
+
+// ACL returns an ACLHandle, which provides access to the bucket's access control list.
+// This controls who can list, create or overwrite the objects in a bucket.
+// This call does not perform any network operations.
+func (c *BucketHandle) ACL() *ACLHandle {
+ return c.acl
+}
+
+// DefaultObjectACL returns an ACLHandle, which provides access to the bucket's default object ACLs.
+// These ACLs are applied to newly created objects in this bucket that do not have a defined ACL.
+// This call does not perform any network operations.
+func (c *BucketHandle) DefaultObjectACL() *ACLHandle {
+ return c.defaultObjectACL
+}
+
+// Object returns an ObjectHandle, which provides operations on the named object.
+// This call does not perform any network operations.
+//
+// name must consist entirely of valid UTF-8-encoded runes. The full specification
+// for valid object names can be found at:
+// https://cloud.google.com/storage/docs/bucket-naming
+func (b *BucketHandle) Object(name string) *ObjectHandle {
+ return &ObjectHandle{
+ c: b.c,
+ bucket: b.name,
+ object: name,
+ acl: &ACLHandle{
+ c: b.c,
+ bucket: b.name,
+ object: name,
+ },
+ }
+}
+
+// TODO(jbd): Add storage.buckets.list.
+// TODO(jbd): Add storage.buckets.update.
+
+// TODO(jbd): Add storage.objects.watch.
+
+// Attrs returns the metadata for the bucket.
+func (b *BucketHandle) Attrs(ctx context.Context) (*BucketAttrs, error) {
+ resp, err := b.c.raw.Buckets.Get(b.name).Projection("full").Context(ctx).Do()
+ if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
+ return nil, ErrBucketNotExist
+ }
+ if err != nil {
+ return nil, err
+ }
+ return newBucket(resp), nil
+}
+
+// List lists objects from the bucket. You can specify a query
+// to filter the results. If q is nil, no filtering is applied.
+func (b *BucketHandle) List(ctx context.Context, q *Query) (*ObjectList, error) {
+ req := b.c.raw.Objects.List(b.name)
+ req.Projection("full")
+ if q != nil {
+ req.Delimiter(q.Delimiter)
+ req.Prefix(q.Prefix)
+ req.Versions(q.Versions)
+ req.PageToken(q.Cursor)
+ if q.MaxResults > 0 {
+ req.MaxResults(int64(q.MaxResults))
+ }
+ }
+ resp, err := req.Context(ctx).Do()
+ if err != nil {
+ return nil, err
+ }
+ objects := &ObjectList{
+ Results: make([]*ObjectAttrs, len(resp.Items)),
+ Prefixes: make([]string, len(resp.Prefixes)),
+ }
+ for i, item := range resp.Items {
+ objects.Results[i] = newObject(item)
+ }
+ for i, prefix := range resp.Prefixes {
+ objects.Prefixes[i] = prefix
+ }
+ if resp.NextPageToken != "" {
+ next := Query{}
+ if q != nil {
+ // keep the other filtering
+ // criteria if there is a query
+ next = *q
+ }
+ next.Cursor = resp.NextPageToken
+ objects.Next = &next
+ }
+ return objects, nil
+}
+
+// SignedURLOptions allows you to restrict the access to the signed URL.
+type SignedURLOptions struct {
+ // GoogleAccessID represents the authorizer of the signed URL generation.
+ // It is typically the Google service account client email address from
+ // the Google Developers Console in the form of "xxx@developer.gserviceaccount.com".
+ // Required.
+ GoogleAccessID string
+
+ // PrivateKey is the Google service account private key. It is obtainable
+ // from the Google Developers Console.
+ // At https://console.developers.google.com/project/<your-project-id>/apiui/credential,
+ // create a service account client ID or reuse one of your existing service account
+ // credentials. Click on the "Generate new P12 key" to generate and download
+ // a new private key. Once you download the P12 file, use the following command
+ // to convert it into a PEM file.
+ //
+ // $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes
+ //
+ // Provide the contents of the PEM file as a byte slice.
+ // Required.
+ PrivateKey []byte
+
+ // Method is the HTTP method to be used with the signed URL.
+ // Signed URLs can be used with GET, HEAD, PUT, and DELETE requests.
+ // Required.
+ Method string
+
+ // Expires is the expiration time on the signed URL. It must be
+ // a datetime in the future.
+ // Required.
+ Expires time.Time
+
+ // ContentType is the content type header the client must provide
+ // to use the generated signed URL.
+ // Optional.
+ ContentType string
+
+ // Headers is a list of extention headers the client must provide
+ // in order to use the generated signed URL.
+ // Optional.
+ Headers []string
+
+ // MD5 is the base64 encoded MD5 checksum of the file.
+ // If provided, the client should provide the exact value on the request
+ // header in order to use the signed URL.
+ // Optional.
+ MD5 []byte
+}
+
+// SignedURL returns a URL for the specified object. Signed URLs allow
+// the users access to a restricted resource for a limited time without having a
+// Google account or signing in. For more information about the signed
+// URLs, see https://cloud.google.com/storage/docs/accesscontrol#Signed-URLs.
+func SignedURL(bucket, name string, opts *SignedURLOptions) (string, error) {
+ if opts == nil {
+ return "", errors.New("storage: missing required SignedURLOptions")
+ }
+ if opts.GoogleAccessID == "" || opts.PrivateKey == nil {
+ return "", errors.New("storage: missing required credentials to generate a signed URL")
+ }
+ if opts.Method == "" {
+ return "", errors.New("storage: missing required method option")
+ }
+ if opts.Expires.IsZero() {
+ return "", errors.New("storage: missing required expires option")
+ }
+ key, err := parseKey(opts.PrivateKey)
+ if err != nil {
+ return "", err
+ }
+ u := &url.URL{
+ Path: fmt.Sprintf("/%s/%s", bucket, name),
+ }
+ h := sha256.New()
+ fmt.Fprintf(h, "%s\n", opts.Method)
+ fmt.Fprintf(h, "%s\n", opts.MD5)
+ fmt.Fprintf(h, "%s\n", opts.ContentType)
+ fmt.Fprintf(h, "%d\n", opts.Expires.Unix())
+ fmt.Fprintf(h, "%s", strings.Join(opts.Headers, "\n"))
+ fmt.Fprintf(h, "%s", u.String())
+ b, err := rsa.SignPKCS1v15(
+ rand.Reader,
+ key,
+ crypto.SHA256,
+ h.Sum(nil),
+ )
+ if err != nil {
+ return "", err
+ }
+ encoded := base64.StdEncoding.EncodeToString(b)
+ u.Scheme = "https"
+ u.Host = "storage.googleapis.com"
+ q := u.Query()
+ q.Set("GoogleAccessId", opts.GoogleAccessID)
+ q.Set("Expires", fmt.Sprintf("%d", opts.Expires.Unix()))
+ q.Set("Signature", string(encoded))
+ u.RawQuery = q.Encode()
+ return u.String(), nil
+}
+
+// ObjectHandle provides operations on an object in a Google Cloud Storage bucket.
+// Use BucketHandle.Object to get a handle.
+type ObjectHandle struct {
+ c *Client
+ bucket string
+ object string
+
+ acl *ACLHandle
+ conds []Condition
+}
+
+// ACL provides access to the object's access control list.
+// This controls who can read and write this object.
+// This call does not perform any network operations.
+func (o *ObjectHandle) ACL() *ACLHandle {
+ return o.acl
+}
+
+// WithConditions returns a copy of o using the provided conditions.
+func (o *ObjectHandle) WithConditions(conds ...Condition) *ObjectHandle {
+ o2 := *o
+ o2.conds = conds
+ return &o2
+}
+
+// Attrs returns meta information about the object.
+// ErrObjectNotExist will be returned if the object is not found.
+func (o *ObjectHandle) Attrs(ctx context.Context) (*ObjectAttrs, error) {
+ if !utf8.ValidString(o.object) {
+ return nil, fmt.Errorf("storage: object name %q is not valid UTF-8", o.object)
+ }
+ call := o.c.raw.Objects.Get(o.bucket, o.object).Projection("full").Context(ctx)
+ if err := applyConds("Attrs", o.conds, call); err != nil {
+ return nil, err
+ }
+ obj, err := call.Do()
+ if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
+ return nil, ErrObjectNotExist
+ }
+ if err != nil {
+ return nil, err
+ }
+ return newObject(obj), nil
+}
+
+// Update updates an object with the provided attributes.
+// All zero-value attributes are ignored.
+// ErrObjectNotExist will be returned if the object is not found.
+func (o *ObjectHandle) Update(ctx context.Context, attrs ObjectAttrs) (*ObjectAttrs, error) {
+ if !utf8.ValidString(o.object) {
+ return nil, fmt.Errorf("storage: object name %q is not valid UTF-8", o.object)
+ }
+ call := o.c.raw.Objects.Patch(o.bucket, o.object, attrs.toRawObject(o.bucket)).Projection("full").Context(ctx)
+ if err := applyConds("Update", o.conds, call); err != nil {
+ return nil, err
+ }
+ obj, err := call.Do()
+ if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
+ return nil, ErrObjectNotExist
+ }
+ if err != nil {
+ return nil, err
+ }
+ return newObject(obj), nil
+}
+
+// Delete deletes the single specified object.
+func (o *ObjectHandle) Delete(ctx context.Context) error {
+ if !utf8.ValidString(o.object) {
+ return fmt.Errorf("storage: object name %q is not valid UTF-8", o.object)
+ }
+ call := o.c.raw.Objects.Delete(o.bucket, o.object).Context(ctx)
+ if err := applyConds("Delete", o.conds, call); err != nil {
+ return err
+ }
+ err := call.Do()
+ switch e := err.(type) {
+ case nil:
+ return nil
+ case *googleapi.Error:
+ if e.Code == http.StatusNotFound {
+ return ErrObjectNotExist
+ }
+ }
+ return err
+}
+
+// CopyTo copies the object to the given dst.
+// The copied object's attributes are overwritten by attrs if non-nil.
+func (o *ObjectHandle) CopyTo(ctx context.Context, dst *ObjectHandle, attrs *ObjectAttrs) (*ObjectAttrs, error) {
+ // TODO(djd): move bucket/object name validation to a single helper func.
+ if o.bucket == "" || dst.bucket == "" {
+ return nil, errors.New("storage: the source and destination bucket names must both be non-empty")
+ }
+ if o.object == "" || dst.object == "" {
+ return nil, errors.New("storage: the source and destination object names must both be non-empty")
+ }
+ if !utf8.ValidString(o.object) {
+ return nil, fmt.Errorf("storage: object name %q is not valid UTF-8", o.object)
+ }
+ if !utf8.ValidString(dst.object) {
+ return nil, fmt.Errorf("storage: dst name %q is not valid UTF-8", dst.object)
+ }
+ var rawObject *raw.Object
+ if attrs != nil {
+ attrs.Name = dst.object
+ if attrs.ContentType == "" {
+ return nil, errors.New("storage: attrs.ContentType must be non-empty")
+ }
+ rawObject = attrs.toRawObject(dst.bucket)
+ }
+ call := o.c.raw.Objects.Copy(o.bucket, o.object, dst.bucket, dst.object, rawObject).Projection("full").Context(ctx)
+ if err := applyConds("CopyTo destination", dst.conds, call); err != nil {
+ return nil, err
+ }
+ if err := applyConds("CopyTo source", toSourceConds(o.conds), call); err != nil {
+ return nil, err
+ }
+ obj, err := call.Do()
+ if err != nil {
+ return nil, err
+ }
+ return newObject(obj), nil
+}
+
+// NewReader creates a new Reader to read the contents of the
+// object.
+// ErrObjectNotExist will be returned if the object is not found.
+func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) {
+ return o.NewRangeReader(ctx, 0, -1)
+}
+
+// NewRangeReader reads part of an object, reading at most length bytes
+// starting at the given offset. If length is negative, the object is read
+// until the end.
+func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (*Reader, error) {
+ if !utf8.ValidString(o.object) {
+ return nil, fmt.Errorf("storage: object name %q is not valid UTF-8", o.object)
+ }
+ if offset < 0 {
+ return nil, fmt.Errorf("storage: invalid offset %d < 0", offset)
+ }
+ u := &url.URL{
+ Scheme: "https",
+ Host: "storage.googleapis.com",
+ Path: fmt.Sprintf("/%s/%s", o.bucket, o.object),
+ }
+ verb := "GET"
+ if length == 0 {
+ verb = "HEAD"
+ }
+ req, err := http.NewRequest(verb, u.String(), nil)
+ if err != nil {
+ return nil, err
+ }
+ if err := applyConds("NewReader", o.conds, objectsGetCall{req}); err != nil {
+ return nil, err
+ }
+ if length < 0 && offset > 0 {
+ req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset))
+ } else if length > 0 {
+ req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1))
+ }
+ res, err := o.c.hc.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ if res.StatusCode == http.StatusNotFound {
+ res.Body.Close()
+ return nil, ErrObjectNotExist
+ }
+ if res.StatusCode < 200 || res.StatusCode > 299 {
+ res.Body.Close()
+ return nil, fmt.Errorf("storage: can't read object %v/%v, status code: %v", o.bucket, o.object, res.Status)
+ }
+ if offset > 0 && length != 0 && res.StatusCode != http.StatusPartialContent {
+ res.Body.Close()
+ return nil, errors.New("storage: partial request not satisfied")
+ }
+ clHeader := res.Header.Get("X-Goog-Stored-Content-Length")
+ cl, err := strconv.ParseInt(clHeader, 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("storage: can't parse content length %q: %v", clHeader, err)
+ }
+ remain := res.ContentLength
+ body := res.Body
+ if length == 0 {
+ remain = 0
+ body.Close()
+ body = emptyBody
+ }
+ return &Reader{
+ body: body,
+ size: cl,
+ remain: remain,
+ contentType: res.Header.Get("Content-Type"),
+ }, nil
+}
+
+var emptyBody = ioutil.NopCloser(strings.NewReader(""))
+
+// NewWriter returns a storage Writer that writes to the GCS object
+// associated with this ObjectHandle.
+//
+// A new object will be created if an object with this name already exists.
+// Otherwise any previous object with the same name will be replaced.
+// The object will not be available (and any previous object will remain)
+// until Close has been called.
+//
+// Attributes can be set on the object by modifying the returned Writer's
+// ObjectAttrs field before the first call to Write. If no ContentType
+// attribute is specified, the content type will be automatically sniffed
+// using net/http.DetectContentType.
+//
+// It is the caller's responsibility to call Close when writing is done.
+func (o *ObjectHandle) NewWriter(ctx context.Context) *Writer {
+ return &Writer{
+ ctx: ctx,
+ o: o,
+ donec: make(chan struct{}),
+ ObjectAttrs: ObjectAttrs{Name: o.object},
+ }
+}
+
+// parseKey converts the binary contents of a private key file
+// to an *rsa.PrivateKey. It detects whether the private key is in a
+// PEM container or not. If so, it extracts the the private key
+// from PEM container before conversion. It only supports PEM
+// containers with no passphrase.
+func parseKey(key []byte) (*rsa.PrivateKey, error) {
+ if block, _ := pem.Decode(key); block != nil {
+ key = block.Bytes
+ }
+ parsedKey, err := x509.ParsePKCS8PrivateKey(key)
+ if err != nil {
+ parsedKey, err = x509.ParsePKCS1PrivateKey(key)
+ if err != nil {
+ return nil, err
+ }
+ }
+ parsed, ok := parsedKey.(*rsa.PrivateKey)
+ if !ok {
+ return nil, errors.New("oauth2: private key is invalid")
+ }
+ return parsed, nil
+}
+
+// BucketAttrs represents the metadata for a Google Cloud Storage bucket.
+type BucketAttrs struct {
+ // Name is the name of the bucket.
+ Name string
+
+ // ACL is the list of access control rules on the bucket.
+ ACL []ACLRule
+
+ // DefaultObjectACL is the list of access controls to
+ // apply to new objects when no object ACL is provided.
+ DefaultObjectACL []ACLRule
+
+ // Location is the location of the bucket. It defaults to "US".
+ Location string
+
+ // MetaGeneration is the metadata generation of the bucket.
+ MetaGeneration int64
+
+ // StorageClass is the storage class of the bucket. This defines
+ // how objects in the bucket are stored and determines the SLA
+ // and the cost of storage. Typical values are "STANDARD" and
+ // "DURABLE_REDUCED_AVAILABILITY". Defaults to "STANDARD".
+ StorageClass string
+
+ // Created is the creation time of the bucket.
+ Created time.Time
+}
+
+func newBucket(b *raw.Bucket) *BucketAttrs {
+ if b == nil {
+ return nil
+ }
+ bucket := &BucketAttrs{
+ Name: b.Name,
+ Location: b.Location,
+ MetaGeneration: b.Metageneration,
+ StorageClass: b.StorageClass,
+ Created: convertTime(b.TimeCreated),
+ }
+ acl := make([]ACLRule, len(b.Acl))
+ for i, rule := range b.Acl {
+ acl[i] = ACLRule{
+ Entity: ACLEntity(rule.Entity),
+ Role: ACLRole(rule.Role),
+ }
+ }
+ bucket.ACL = acl
+ objACL := make([]ACLRule, len(b.DefaultObjectAcl))
+ for i, rule := range b.DefaultObjectAcl {
+ objACL[i] = ACLRule{
+ Entity: ACLEntity(rule.Entity),
+ Role: ACLRole(rule.Role),
+ }
+ }
+ bucket.DefaultObjectACL = objACL
+ return bucket
+}
+
+func toRawObjectACL(oldACL []ACLRule) []*raw.ObjectAccessControl {
+ var acl []*raw.ObjectAccessControl
+ if len(oldACL) > 0 {
+ acl = make([]*raw.ObjectAccessControl, len(oldACL))
+ for i, rule := range oldACL {
+ acl[i] = &raw.ObjectAccessControl{
+ Entity: string(rule.Entity),
+ Role: string(rule.Role),
+ }
+ }
+ }
+ return acl
+}
+
+// toRawBucket copies the editable attribute from b to the raw library's Bucket type.
+func (b *BucketAttrs) toRawBucket() *raw.Bucket {
+ var acl []*raw.BucketAccessControl
+ if len(b.ACL) > 0 {
+ acl = make([]*raw.BucketAccessControl, len(b.ACL))
+ for i, rule := range b.ACL {
+ acl[i] = &raw.BucketAccessControl{
+ Entity: string(rule.Entity),
+ Role: string(rule.Role),
+ }
+ }
+ }
+ dACL := toRawObjectACL(b.DefaultObjectACL)
+ return &raw.Bucket{
+ Name: b.Name,
+ DefaultObjectAcl: dACL,
+ Location: b.Location,
+ StorageClass: b.StorageClass,
+ Acl: acl,
+ }
+}
+
+// toRawObject copies the editable attributes from o to the raw library's Object type.
+func (o ObjectAttrs) toRawObject(bucket string) *raw.Object {
+ acl := toRawObjectACL(o.ACL)
+ return &raw.Object{
+ Bucket: bucket,
+ Name: o.Name,
+ ContentType: o.ContentType,
+ ContentEncoding: o.ContentEncoding,
+ ContentLanguage: o.ContentLanguage,
+ CacheControl: o.CacheControl,
+ ContentDisposition: o.ContentDisposition,
+ Acl: acl,
+ Metadata: o.Metadata,
+ }
+}
+
+// ObjectAttrs represents the metadata for a Google Cloud Storage (GCS) object.
+type ObjectAttrs struct {
+ // Bucket is the name of the bucket containing this GCS object.
+ // This field is read-only.
+ Bucket string
+
+ // Name is the name of the object within the bucket.
+ // This field is read-only.
+ Name string
+
+ // ContentType is the MIME type of the object's content.
+ ContentType string
+
+ // ContentLanguage is the content language of the object's content.
+ ContentLanguage string
+
+ // CacheControl is the Cache-Control header to be sent in the response
+ // headers when serving the object data.
+ CacheControl string
+
+ // ACL is the list of access control rules for the object.
+ ACL []ACLRule
+
+ // Owner is the owner of the object. This field is read-only.
+ //
+ // If non-zero, it is in the form of "user-<userId>".
+ Owner string
+
+ // Size is the length of the object's content. This field is read-only.
+ Size int64
+
+ // ContentEncoding is the encoding of the object's content.
+ ContentEncoding string
+
+ // ContentDisposition is the optional Content-Disposition header of the object
+ // sent in the response headers.
+ ContentDisposition string
+
+ // MD5 is the MD5 hash of the object's content. This field is read-only.
+ MD5 []byte
+
+ // CRC32C is the CRC32 checksum of the object's content using
+ // the Castagnoli93 polynomial. This field is read-only.
+ CRC32C uint32
+
+ // MediaLink is an URL to the object's content. This field is read-only.
+ MediaLink string
+
+ // Metadata represents user-provided metadata, in key/value pairs.
+ // It can be nil if no metadata is provided.
+ Metadata map[string]string
+
+ // Generation is the generation number of the object's content.
+ // This field is read-only.
+ Generation int64
+
+ // MetaGeneration is the version of the metadata for this
+ // object at this generation. This field is used for preconditions
+ // and for detecting changes in metadata. A metageneration number
+ // is only meaningful in the context of a particular generation
+ // of a particular object. This field is read-only.
+ MetaGeneration int64
+
+ // StorageClass is the storage class of the bucket.
+ // This value defines how objects in the bucket are stored and
+ // determines the SLA and the cost of storage. Typical values are
+ // "STANDARD" and "DURABLE_REDUCED_AVAILABILITY".
+ // It defaults to "STANDARD". This field is read-only.
+ StorageClass string
+
+ // Created is the time the object was created. This field is read-only.
+ Created time.Time
+
+ // Deleted is the time the object was deleted.
+ // If not deleted, it is the zero value. This field is read-only.
+ Deleted time.Time
+
+ // Updated is the creation or modification time of the object.
+ // For buckets with versioning enabled, changing an object's
+ // metadata does not change this property. This field is read-only.
+ Updated time.Time
+}
+
+// convertTime converts a time in RFC3339 format to time.Time.
+// If any error occurs in parsing, the zero-value time.Time is silently returned.
+func convertTime(t string) time.Time {
+ var r time.Time
+ if t != "" {
+ r, _ = time.Parse(time.RFC3339, t)
+ }
+ return r
+}
+
+func newObject(o *raw.Object) *ObjectAttrs {
+ if o == nil {
+ return nil
+ }
+ acl := make([]ACLRule, len(o.Acl))
+ for i, rule := range o.Acl {
+ acl[i] = ACLRule{
+ Entity: ACLEntity(rule.Entity),
+ Role: ACLRole(rule.Role),
+ }
+ }
+ owner := ""
+ if o.Owner != nil {
+ owner = o.Owner.Entity
+ }
+ md5, _ := base64.StdEncoding.DecodeString(o.Md5Hash)
+ var crc32c uint32
+ d, err := base64.StdEncoding.DecodeString(o.Crc32c)
+ if err == nil && len(d) == 4 {
+ crc32c = uint32(d[0])<<24 + uint32(d[1])<<16 + uint32(d[2])<<8 + uint32(d[3])
+ }
+ return &ObjectAttrs{
+ Bucket: o.Bucket,
+ Name: o.Name,
+ ContentType: o.ContentType,
+ ContentLanguage: o.ContentLanguage,
+ CacheControl: o.CacheControl,
+ ACL: acl,
+ Owner: owner,
+ ContentEncoding: o.ContentEncoding,
+ Size: int64(o.Size),
+ MD5: md5,
+ CRC32C: crc32c,
+ MediaLink: o.MediaLink,
+ Metadata: o.Metadata,
+ Generation: o.Generation,
+ MetaGeneration: o.Metageneration,
+ StorageClass: o.StorageClass,
+ Created: convertTime(o.TimeCreated),
+ Deleted: convertTime(o.TimeDeleted),
+ Updated: convertTime(o.Updated),
+ }
+}
+
+// Query represents a query to filter objects from a bucket.
+type Query struct {
+ // Delimiter returns results in a directory-like fashion.
+ // Results will contain only objects whose names, aside from the
+ // prefix, do not contain delimiter. Objects whose names,
+ // aside from the prefix, contain delimiter will have their name,
+ // truncated after the delimiter, returned in prefixes.
+ // Duplicate prefixes are omitted.
+ // Optional.
+ Delimiter string
+
+ // Prefix is the prefix filter to query objects
+ // whose names begin with this prefix.
+ // Optional.
+ Prefix string
+
+ // Versions indicates whether multiple versions of the same
+ // object will be included in the results.
+ Versions bool
+
+ // Cursor is a previously-returned page token
+ // representing part of the larger set of results to view.
+ // Optional.
+ Cursor string
+
+ // MaxResults is the maximum number of items plus prefixes
+ // to return. As duplicate prefixes are omitted,
+ // fewer total results may be returned than requested.
+ // The default page limit is used if it is negative or zero.
+ MaxResults int
+}
+
+// ObjectList represents a list of objects returned from a bucket List call.
+type ObjectList struct {
+ // Results represent a list of object results.
+ Results []*ObjectAttrs
+
+ // Next is the continuation query to retrieve more
+ // results with the same filtering criteria. If there
+ // are no more results to retrieve, it is nil.
+ Next *Query
+
+ // Prefixes represents prefixes of objects
+ // matching-but-not-listed up to and including
+ // the requested delimiter.
+ Prefixes []string
+}
+
+// contentTyper implements ContentTyper to enable an
+// io.ReadCloser to specify its MIME type.
+type contentTyper struct {
+ io.Reader
+ t string
+}
+
+func (c *contentTyper) ContentType() string {
+ return c.t
+}
+
+// A Condition constrains methods to act on specific generations of
+// resources.
+//
+// Not all conditions or combinations of conditions are applicable to
+// all methods.
+type Condition interface {
+ // method is the high-level ObjectHandle method name, for
+ // error messages. call is the call object to modify.
+ modifyCall(method string, call interface{}) error
+}
+
+// applyConds modifies the provided call using the conditions in conds.
+// call is something that quacks like a *raw.WhateverCall.
+func applyConds(method string, conds []Condition, call interface{}) error {
+ for _, cond := range conds {
+ if err := cond.modifyCall(method, call); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// toSourceConds returns a slice of Conditions derived from Conds that instead
+// function on the equivalent Source methods of a call.
+func toSourceConds(conds []Condition) []Condition {
+ out := make([]Condition, 0, len(conds))
+ for _, c := range conds {
+ switch c := c.(type) {
+ case genCond:
+ var m string
+ if strings.HasPrefix(c.method, "If") {
+ m = "IfSource" + c.method[2:]
+ } else {
+ m = "Source" + c.method
+ }
+ out = append(out, genCond{method: m, val: c.val})
+ default:
+ // NOTE(djd): If the message from unsupportedCond becomes
+ // confusing, we'll need to find a way for Conditions to
+ // identify themselves.
+ out = append(out, unsupportedCond{})
+ }
+ }
+ return out
+}
+
+func Generation(gen int64) Condition { return genCond{"Generation", gen} }
+func IfGenerationMatch(gen int64) Condition { return genCond{"IfGenerationMatch", gen} }
+func IfGenerationNotMatch(gen int64) Condition { return genCond{"IfGenerationNotMatch", gen} }
+func IfMetaGenerationMatch(gen int64) Condition { return genCond{"IfMetagenerationMatch", gen} }
+func IfMetaGenerationNotMatch(gen int64) Condition { return genCond{"IfMetagenerationNotMatch", gen} }
+
+type genCond struct {
+ method string
+ val int64
+}
+
+func (g genCond) modifyCall(srcMethod string, call interface{}) error {
+ rv := reflect.ValueOf(call)
+ meth := rv.MethodByName(g.method)
+ if !meth.IsValid() {
+ return fmt.Errorf("%s: condition %s not supported", srcMethod, g.method)
+ }
+ meth.Call([]reflect.Value{reflect.ValueOf(g.val)})
+ return nil
+}
+
+type unsupportedCond struct{}
+
+func (unsupportedCond) modifyCall(srcMethod string, call interface{}) error {
+ return fmt.Errorf("%s: condition not supported", srcMethod)
+}
+
+func appendParam(req *http.Request, k, v string) {
+ sep := ""
+ if req.URL.RawQuery != "" {
+ sep = "&"
+ }
+ req.URL.RawQuery += sep + url.QueryEscape(k) + "=" + url.QueryEscape(v)
+}
+
+// objectsGetCall wraps an *http.Request for an object fetch call, but adds the methods
+// that modifyCall searches for by name. (the same names as the raw, auto-generated API)
+type objectsGetCall struct{ req *http.Request }
+
+func (c objectsGetCall) Generation(gen int64) {
+ appendParam(c.req, "generation", fmt.Sprint(gen))
+}
+func (c objectsGetCall) IfGenerationMatch(gen int64) {
+ appendParam(c.req, "ifGenerationMatch", fmt.Sprint(gen))
+}
+func (c objectsGetCall) IfGenerationNotMatch(gen int64) {
+ appendParam(c.req, "ifGenerationNotMatch", fmt.Sprint(gen))
+}
+func (c objectsGetCall) IfMetagenerationMatch(gen int64) {
+ appendParam(c.req, "ifMetagenerationMatch", fmt.Sprint(gen))
+}
+func (c objectsGetCall) IfMetagenerationNotMatch(gen int64) {
+ appendParam(c.req, "ifMetagenerationNotMatch", fmt.Sprint(gen))
+}
diff --git a/vendor/google.golang.org/cloud/storage/writer.go b/vendor/google.golang.org/cloud/storage/writer.go
new file mode 100644
index 0000000..60937c0
--- /dev/null
+++ b/vendor/google.golang.org/cloud/storage/writer.go
@@ -0,0 +1,129 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package storage
+
+import (
+ "fmt"
+ "io"
+ "unicode/utf8"
+
+ "golang.org/x/net/context"
+ "google.golang.org/api/googleapi"
+ raw "google.golang.org/api/storage/v1"
+)
+
+// A Writer writes a Cloud Storage object.
+type Writer struct {
+ // ObjectAttrs are optional attributes to set on the object. Any attributes
+ // must be initialized before the first Write call. Nil or zero-valued
+ // attributes are ignored.
+ ObjectAttrs
+
+ ctx context.Context
+ o *ObjectHandle
+
+ opened bool
+ pw *io.PipeWriter
+
+ donec chan struct{} // closed after err and obj are set.
+ err error
+ obj *ObjectAttrs
+}
+
+func (w *Writer) open() error {
+ attrs := w.ObjectAttrs
+ // Check the developer didn't change the object Name (this is unfortunate, but
+ // we don't want to store an object under the wrong name).
+ if attrs.Name != w.o.object {
+ return fmt.Errorf("storage: Writer.Name %q does not match object name %q", attrs.Name, w.o.object)
+ }
+ if !utf8.ValidString(attrs.Name) {
+ return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name)
+ }
+ pr, pw := io.Pipe()
+ w.pw = pw
+ w.opened = true
+
+ var mediaOpts []googleapi.MediaOption
+ if c := attrs.ContentType; c != "" {
+ mediaOpts = append(mediaOpts, googleapi.ContentType(c))
+ }
+
+ go func() {
+ defer close(w.donec)
+
+ call := w.o.c.raw.Objects.Insert(w.o.bucket, attrs.toRawObject(w.o.bucket)).
+ Media(pr, mediaOpts...).
+ Projection("full").
+ Context(w.ctx)
+
+ var resp *raw.Object
+ err := applyConds("NewWriter", w.o.conds, call)
+ if err == nil {
+ resp, err = call.Do()
+ }
+ if err != nil {
+ w.err = err
+ pr.CloseWithError(w.err)
+ return
+ }
+ w.obj = newObject(resp)
+ }()
+ return nil
+}
+
+// Write appends to w.
+func (w *Writer) Write(p []byte) (n int, err error) {
+ if w.err != nil {
+ return 0, w.err
+ }
+ if !w.opened {
+ if err := w.open(); err != nil {
+ return 0, err
+ }
+ }
+ return w.pw.Write(p)
+}
+
+// Close completes the write operation and flushes any buffered data.
+// If Close doesn't return an error, metadata about the written object
+// can be retrieved by calling Object.
+func (w *Writer) Close() error {
+ if !w.opened {
+ if err := w.open(); err != nil {
+ return err
+ }
+ }
+ if err := w.pw.Close(); err != nil {
+ return err
+ }
+ <-w.donec
+ return w.err
+}
+
+// CloseWithError aborts the write operation with the provided error.
+// CloseWithError always returns nil.
+func (w *Writer) CloseWithError(err error) error {
+ if !w.opened {
+ return nil
+ }
+ return w.pw.CloseWithError(err)
+}
+
+// ObjectAttrs returns metadata about a successfully-written object.
+// It's only valid to call it after Close returns nil.
+func (w *Writer) Attrs() *ObjectAttrs {
+ return w.obj
+}