aboutsummaryrefslogtreecommitdiff
path: root/vendor/github.com
diff options
context:
space:
mode:
authorNiall Sheridan <nsheridan@gmail.com>2017-10-18 13:15:14 +0100
committerNiall Sheridan <niall@intercom.io>2017-10-18 13:25:46 +0100
commit7b320119ba532fd409ec7dade7ad02011c309599 (patch)
treea39860f35b55e6cc499f8f5bfa969138c5dd6b73 /vendor/github.com
parent7c99874c7a3e7a89716f3ee0cdf696532e35ae35 (diff)
Update dependencies
Diffstat (limited to 'vendor/github.com')
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/client/client.go62
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go10
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/client/logger.go108
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/config.go13
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go6
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/convert_types.go18
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go129
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go14
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go31
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go1
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go33
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go2
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go46
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go27
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/doc.go56
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go341
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go4
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go112
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go17
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go3
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/logger.go4
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go19
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go31
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/request.go80
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go22
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go30
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go92
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/validation.go2
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go32
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/session/doc.go5
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go43
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/session/session.go30
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go2
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go85
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/url.go12
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go29
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/version.go2
-rw-r--r--vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go40
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go4
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go1
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go7
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go13
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/api.go2106
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go65
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go18
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/doc.go26
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go109
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go13
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/service.go10
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/sse.go18
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go8
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/sts/api.go140
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/sts/doc.go72
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/sts/service.go52
-rw-r--r--vendor/github.com/davecgh/go-spew/LICENSE2
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/bypass.go6
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/common.go2
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/dump.go10
-rw-r--r--vendor/github.com/davecgh/go-spew/spew/format.go4
-rw-r--r--vendor/github.com/go-ini/ini/LICENSE2
-rw-r--r--vendor/github.com/go-ini/ini/README.md37
-rw-r--r--vendor/github.com/go-ini/ini/README_ZH.md37
-rw-r--r--vendor/github.com/go-ini/ini/ini.go78
-rw-r--r--vendor/github.com/go-ini/ini/key.go37
-rw-r--r--vendor/github.com/go-ini/ini/parser.go21
-rw-r--r--vendor/github.com/go-ini/ini/struct.go104
-rw-r--r--vendor/github.com/go-sql-driver/mysql/AUTHORS16
-rw-r--r--vendor/github.com/go-sql-driver/mysql/README.md65
-rw-r--r--vendor/github.com/go-sql-driver/mysql/connection.go114
-rw-r--r--vendor/github.com/go-sql-driver/mysql/connection_go18.go197
-rw-r--r--vendor/github.com/go-sql-driver/mysql/const.go9
-rw-r--r--vendor/github.com/go-sql-driver/mysql/driver.go14
-rw-r--r--vendor/github.com/go-sql-driver/mysql/dsn.go119
-rw-r--r--vendor/github.com/go-sql-driver/mysql/errors.go79
-rw-r--r--vendor/github.com/go-sql-driver/mysql/fields.go140
-rw-r--r--vendor/github.com/go-sql-driver/mysql/infile.go3
-rw-r--r--vendor/github.com/go-sql-driver/mysql/packets.go134
-rw-r--r--vendor/github.com/go-sql-driver/mysql/rows.go111
-rw-r--r--vendor/github.com/go-sql-driver/mysql/statement.go27
-rw-r--r--vendor/github.com/go-sql-driver/mysql/transaction.go4
-rw-r--r--vendor/github.com/go-sql-driver/mysql/utils.go82
-rw-r--r--vendor/github.com/go-sql-driver/mysql/utils_go17.go40
-rw-r--r--vendor/github.com/go-sql-driver/mysql/utils_go18.go49
-rw-r--r--vendor/github.com/go-sql-driver/mysql/utils_legacy.go18
-rw-r--r--vendor/github.com/golang/protobuf/proto/encode.go4
-rw-r--r--vendor/github.com/golang/protobuf/proto/lib.go1
-rw-r--r--vendor/github.com/golang/protobuf/proto/text_parser.go2
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile8
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go516
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto849
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/any.go3
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/any/any.pb.go59
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/any/any.proto11
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go72
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/duration/duration.proto23
-rwxr-xr-xvendor/github.com/golang/protobuf/ptypes/regen.sh37
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/timestamp.go9
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go85
-rw-r--r--vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto34
-rw-r--r--vendor/github.com/golang/snappy/AUTHORS15
-rw-r--r--vendor/github.com/golang/snappy/CONTRIBUTORS37
-rw-r--r--vendor/github.com/golang/snappy/LICENSE27
-rw-r--r--vendor/github.com/golang/snappy/README107
-rw-r--r--vendor/github.com/golang/snappy/decode.go237
-rw-r--r--vendor/github.com/golang/snappy/decode_amd64.go14
-rw-r--r--vendor/github.com/golang/snappy/decode_amd64.s490
-rw-r--r--vendor/github.com/golang/snappy/encode.go285
-rw-r--r--vendor/github.com/golang/snappy/encode_amd64.go29
-rw-r--r--vendor/github.com/golang/snappy/encode_amd64.s730
-rw-r--r--vendor/github.com/golang/snappy/snappy.go87
-rw-r--r--vendor/github.com/google/go-github/github/activity_events.go12
-rw-r--r--vendor/github.com/google/go-github/github/apps.go (renamed from vendor/github.com/google/go-github/github/integration.go)14
-rw-r--r--vendor/github.com/google/go-github/github/apps_installation.go84
-rw-r--r--vendor/github.com/google/go-github/github/authorizations.go11
-rw-r--r--vendor/github.com/google/go-github/github/doc.go39
-rw-r--r--vendor/github.com/google/go-github/github/event_types.go82
-rw-r--r--vendor/github.com/google/go-github/github/gists.go7
-rw-r--r--vendor/github.com/google/go-github/github/git_refs.go58
-rw-r--r--vendor/github.com/google/go-github/github/git_trees.go1
-rw-r--r--vendor/github.com/google/go-github/github/github-accessors.go244
-rw-r--r--vendor/github.com/google/go-github/github/github.go77
-rw-r--r--vendor/github.com/google/go-github/github/integration_installation.go49
-rw-r--r--vendor/github.com/google/go-github/github/issues.go14
-rw-r--r--vendor/github.com/google/go-github/github/issues_events.go2
-rw-r--r--vendor/github.com/google/go-github/github/issues_labels.go2
-rw-r--r--vendor/github.com/google/go-github/github/messages.go116
-rw-r--r--vendor/github.com/google/go-github/github/misc.go55
-rw-r--r--vendor/github.com/google/go-github/github/orgs.go19
-rw-r--r--vendor/github.com/google/go-github/github/orgs_members.go2
-rw-r--r--vendor/github.com/google/go-github/github/orgs_teams.go3
-rw-r--r--vendor/github.com/google/go-github/github/orgs_users_blocking.go91
-rw-r--r--vendor/github.com/google/go-github/github/projects.go17
-rw-r--r--vendor/github.com/google/go-github/github/pulls.go7
-rw-r--r--vendor/github.com/google/go-github/github/pulls_reviewers.go58
-rw-r--r--vendor/github.com/google/go-github/github/pulls_reviews.go36
-rw-r--r--vendor/github.com/google/go-github/github/repos.go345
-rw-r--r--vendor/github.com/google/go-github/github/repos_collaborators.go23
-rw-r--r--vendor/github.com/google/go-github/github/repos_commits.go9
-rw-r--r--vendor/github.com/google/go-github/github/repos_community_health.go57
-rw-r--r--vendor/github.com/google/go-github/github/repos_contents.go4
-rw-r--r--vendor/github.com/google/go-github/github/repos_deployments.go4
-rw-r--r--vendor/github.com/google/go-github/github/repos_forks.go3
-rw-r--r--vendor/github.com/google/go-github/github/repos_invitations.go6
-rw-r--r--vendor/github.com/google/go-github/github/repos_pages.go7
-rw-r--r--vendor/github.com/google/go-github/github/repos_releases.go2
-rw-r--r--vendor/github.com/google/go-github/github/search.go12
-rw-r--r--vendor/github.com/google/go-github/github/users.go9
-rw-r--r--vendor/github.com/google/go-github/github/users_gpg_keys.go2
-rw-r--r--vendor/github.com/google/go-github/github/without_appengine.go4
-rw-r--r--vendor/github.com/googleapis/gax-go/call_option.go10
-rw-r--r--vendor/github.com/googleapis/gax-go/path_template.go176
-rw-r--r--vendor/github.com/googleapis/gax-go/path_template_parser.go227
-rw-r--r--vendor/github.com/gorilla/csrf/Gopkg.lock27
-rw-r--r--vendor/github.com/gorilla/csrf/Gopkg.toml34
-rw-r--r--vendor/github.com/gorilla/csrf/doc.go228
-rw-r--r--vendor/github.com/gorilla/handlers/handlers.go8
-rw-r--r--vendor/github.com/gorilla/mux/README.md71
-rw-r--r--vendor/github.com/gorilla/mux/doc.go12
-rw-r--r--vendor/github.com/gorilla/mux/mux.go48
-rw-r--r--vendor/github.com/gorilla/mux/regexp.go5
-rw-r--r--vendor/github.com/gorilla/mux/route.go91
-rw-r--r--vendor/github.com/gorilla/sessions/README.md6
-rw-r--r--vendor/github.com/gorilla/sessions/doc.go3
-rw-r--r--vendor/github.com/hashicorp/go-multierror/multierror.go4
-rw-r--r--vendor/github.com/hashicorp/hcl/appveyor.yml2
-rw-r--r--vendor/github.com/hashicorp/hcl/decoder.go37
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/parser/parser.go20
-rw-r--r--vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go2
-rw-r--r--vendor/github.com/hashicorp/hcl/json/parser/parser.go2
-rw-r--r--vendor/github.com/hashicorp/hcl/json/scanner/scanner.go2
-rw-r--r--vendor/github.com/hashicorp/vault/api/auth_token.go20
-rw-r--r--vendor/github.com/hashicorp/vault/api/client.go78
-rw-r--r--vendor/github.com/hashicorp/vault/api/renewer.go302
-rw-r--r--vendor/github.com/hashicorp/vault/api/request.go9
-rw-r--r--vendor/github.com/hashicorp/vault/api/response.go5
-rw-r--r--vendor/github.com/hashicorp/vault/api/secret.go1
-rw-r--r--vendor/github.com/hashicorp/vault/api/ssh.go17
-rw-r--r--vendor/github.com/hashicorp/vault/api/sys_auth.go18
-rw-r--r--vendor/github.com/hashicorp/vault/api/sys_config_cors.go56
-rw-r--r--vendor/github.com/hashicorp/vault/api/sys_health.go29
-rw-r--r--vendor/github.com/hashicorp/vault/api/sys_leader.go7
-rw-r--r--vendor/github.com/hashicorp/vault/api/sys_leases.go (renamed from vendor/github.com/hashicorp/vault/api/sys_lease.go)8
-rw-r--r--vendor/github.com/hashicorp/vault/api/sys_mounts.go10
-rw-r--r--vendor/github.com/hashicorp/vault/helper/compressutil/compress.go40
-rw-r--r--vendor/github.com/hashicorp/vault/helper/parseutil/parseutil.go65
-rw-r--r--vendor/github.com/jmoiron/sqlx/README.md2
-rw-r--r--vendor/github.com/jmoiron/sqlx/sqlx_context.go6
-rw-r--r--vendor/github.com/magiconair/properties/CHANGELOG.md5
-rw-r--r--vendor/github.com/magiconair/properties/README.md2
-rw-r--r--vendor/github.com/magiconair/properties/properties.go3
-rw-r--r--vendor/github.com/mattn/go-sqlite3/README.md12
-rw-r--r--vendor/github.com/mattn/go-sqlite3/callback.go24
-rw-r--r--vendor/github.com/mattn/go-sqlite3/sqlite3.go177
-rw-r--r--vendor/github.com/mattn/go-sqlite3/sqlite3_libsqlite3.go1
-rw-r--r--vendor/github.com/mattn/go-sqlite3/sqlite3_other.go1
-rw-r--r--vendor/github.com/mitchellh/mapstructure/decode_hooks.go14
-rw-r--r--vendor/github.com/mitchellh/mapstructure/mapstructure.go33
-rw-r--r--vendor/github.com/pelletier/go-toml/README.md85
-rw-r--r--vendor/github.com/pelletier/go-toml/benchmark.json164
-rwxr-xr-xvendor/github.com/pelletier/go-toml/benchmark.sh32
-rw-r--r--vendor/github.com/pelletier/go-toml/benchmark.toml244
-rw-r--r--vendor/github.com/pelletier/go-toml/benchmark.yml121
-rwxr-xr-xvendor/github.com/pelletier/go-toml/clean.sh6
-rw-r--r--vendor/github.com/pelletier/go-toml/doc.go251
-rw-r--r--vendor/github.com/pelletier/go-toml/lexer.go88
-rw-r--r--vendor/github.com/pelletier/go-toml/marshal.go125
-rw-r--r--vendor/github.com/pelletier/go-toml/match.go234
-rw-r--r--vendor/github.com/pelletier/go-toml/parser.go72
-rw-r--r--vendor/github.com/pelletier/go-toml/query.go153
-rw-r--r--vendor/github.com/pelletier/go-toml/querylexer.go356
-rw-r--r--vendor/github.com/pelletier/go-toml/queryparser.go275
-rwxr-xr-xvendor/github.com/pelletier/go-toml/test.sh10
-rw-r--r--vendor/github.com/pelletier/go-toml/toml.go144
-rw-r--r--vendor/github.com/pelletier/go-toml/tomltree_create.go31
-rw-r--r--vendor/github.com/pelletier/go-toml/tomltree_write.go145
-rw-r--r--vendor/github.com/pkg/browser/browser_openbsd.go4
-rw-r--r--vendor/github.com/pkg/errors/stack.go8
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/counter.go59
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/desc.go25
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/doc.go13
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/gauge.go56
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/go_collector.go10
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/histogram.go73
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/http.go18
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/labels.go57
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/observer.go50
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go199
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go181
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go44
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go31
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go98
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go144
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go440
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/registry.go11
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/summary.go84
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/timer.go29
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/untyped.go107
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/value.go7
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/vec.go143
-rw-r--r--vendor/github.com/prometheus/common/expfmt/text_parse.go4
-rw-r--r--vendor/github.com/prometheus/common/model/time.go14
-rw-r--r--vendor/github.com/prometheus/procfs/Makefile16
-rw-r--r--vendor/github.com/prometheus/procfs/ipvs.go46
-rw-r--r--vendor/github.com/prometheus/procfs/mountstats.go12
-rw-r--r--vendor/github.com/prometheus/procfs/proc_limits.go38
-rw-r--r--vendor/github.com/prometheus/procfs/stat.go193
-rwxr-xr-xvendor/github.com/prometheus/procfs/ttar264
-rw-r--r--vendor/github.com/prometheus/procfs/xfrm.go187
-rw-r--r--vendor/github.com/prometheus/procfs/xfs/parse.go2
-rw-r--r--vendor/github.com/prometheus/procfs/xfs/xfs.go5
-rw-r--r--vendor/github.com/sethgrid/pester/README.md2
-rw-r--r--vendor/github.com/sethgrid/pester/main.go4
-rw-r--r--vendor/github.com/soheilhy/cmux/README.md2
-rw-r--r--vendor/github.com/soheilhy/cmux/matchers.go102
-rw-r--r--vendor/github.com/spf13/afero/README.md13
-rw-r--r--vendor/github.com/spf13/afero/appveyor.yml2
-rw-r--r--vendor/github.com/spf13/afero/cacheOnReadFs.go9
-rw-r--r--vendor/github.com/spf13/afero/match.go110
-rw-r--r--vendor/github.com/spf13/afero/mem/file.go40
-rw-r--r--vendor/github.com/spf13/afero/memmap.go10
-rw-r--r--vendor/github.com/spf13/afero/memradix.go14
-rw-r--r--vendor/github.com/spf13/cast/cast.go6
-rw-r--r--vendor/github.com/spf13/cast/caste.go29
-rw-r--r--vendor/github.com/spf13/jwalterweatherman/log_counter.go1
-rw-r--r--vendor/github.com/spf13/jwalterweatherman/notepad.go89
-rw-r--r--vendor/github.com/spf13/pflag/README.md25
-rw-r--r--vendor/github.com/spf13/pflag/count.go16
-rw-r--r--vendor/github.com/spf13/pflag/flag.go230
-rw-r--r--vendor/github.com/spf13/pflag/int16.go88
-rw-r--r--vendor/github.com/spf13/viper/README.md50
-rw-r--r--vendor/github.com/spf13/viper/viper.go86
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertion_format.go379
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl4
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertion_forward.go450
-rw-r--r--vendor/github.com/stretchr/testify/assert/assertions.go229
-rw-r--r--vendor/github.com/stretchr/testify/assert/forward_assertions.go2
-rw-r--r--vendor/github.com/stretchr/testify/assert/http_assertions.go49
-rw-r--r--vendor/github.com/xanzy/go-gitlab/CHANGELOG.md5
-rw-r--r--vendor/github.com/xanzy/go-gitlab/README.md13
-rw-r--r--vendor/github.com/xanzy/go-gitlab/branches.go32
-rw-r--r--vendor/github.com/xanzy/go-gitlab/build_variables.go49
-rw-r--r--vendor/github.com/xanzy/go-gitlab/builds.go351
-rw-r--r--vendor/github.com/xanzy/go-gitlab/commits.go114
-rw-r--r--vendor/github.com/xanzy/go-gitlab/deploy_keys.go77
-rw-r--r--vendor/github.com/xanzy/go-gitlab/events.go343
-rw-r--r--vendor/github.com/xanzy/go-gitlab/feature_flags.go79
-rw-r--r--vendor/github.com/xanzy/go-gitlab/gitlab.go81
-rw-r--r--vendor/github.com/xanzy/go-gitlab/groups.go150
-rw-r--r--vendor/github.com/xanzy/go-gitlab/issues.go158
-rw-r--r--vendor/github.com/xanzy/go-gitlab/jobs.go349
-rw-r--r--vendor/github.com/xanzy/go-gitlab/labels.go29
-rw-r--r--vendor/github.com/xanzy/go-gitlab/merge_requests.go250
-rw-r--r--vendor/github.com/xanzy/go-gitlab/milestones.go28
-rw-r--r--vendor/github.com/xanzy/go-gitlab/namespaces.go16
-rw-r--r--vendor/github.com/xanzy/go-gitlab/notes.go114
-rw-r--r--vendor/github.com/xanzy/go-gitlab/notifications.go21
-rw-r--r--vendor/github.com/xanzy/go-gitlab/pipeline_triggers.go234
-rw-r--r--vendor/github.com/xanzy/go-gitlab/pipelines.go40
-rw-r--r--vendor/github.com/xanzy/go-gitlab/project_members.go179
-rw-r--r--vendor/github.com/xanzy/go-gitlab/project_snippets.go45
-rw-r--r--vendor/github.com/xanzy/go-gitlab/projects.go727
-rw-r--r--vendor/github.com/xanzy/go-gitlab/repositories.go52
-rw-r--r--vendor/github.com/xanzy/go-gitlab/repository_files.go105
-rw-r--r--vendor/github.com/xanzy/go-gitlab/services.go112
-rw-r--r--vendor/github.com/xanzy/go-gitlab/session.go14
-rw-r--r--vendor/github.com/xanzy/go-gitlab/settings.go84
-rw-r--r--vendor/github.com/xanzy/go-gitlab/strings.go2
-rw-r--r--vendor/github.com/xanzy/go-gitlab/system_hooks.go21
-rw-r--r--vendor/github.com/xanzy/go-gitlab/tags.go37
-rw-r--r--vendor/github.com/xanzy/go-gitlab/time_stats.go62
-rw-r--r--vendor/github.com/xanzy/go-gitlab/todos.go175
-rw-r--r--vendor/github.com/xanzy/go-gitlab/users.go265
-rw-r--r--vendor/github.com/xanzy/go-gitlab/version.go56
-rw-r--r--vendor/github.com/xanzy/go-gitlab/wikis.go204
314 files changed, 18134 insertions, 6922 deletions
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go
index 17fc76a..788fe6e 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go
@@ -2,7 +2,6 @@ package client
import (
"fmt"
- "net/http/httputil"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client/metadata"
@@ -46,7 +45,7 @@ func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, op
svc := &Client{
Config: cfg,
ClientInfo: info,
- Handlers: handlers,
+ Handlers: handlers.Copy(),
}
switch retryer, ok := cfg.Retryer.(request.Retryer); {
@@ -86,61 +85,6 @@ func (c *Client) AddDebugHandlers() {
return
}
- c.Handlers.Send.PushFront(logRequest)
- c.Handlers.Send.PushBack(logResponse)
-}
-
-const logReqMsg = `DEBUG: Request %s/%s Details:
----[ REQUEST POST-SIGN ]-----------------------------
-%s
------------------------------------------------------`
-
-const logReqErrMsg = `DEBUG ERROR: Request %s/%s:
----[ REQUEST DUMP ERROR ]-----------------------------
-%s
------------------------------------------------------`
-
-func logRequest(r *request.Request) {
- logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
- dumpedBody, err := httputil.DumpRequestOut(r.HTTPRequest, logBody)
- if err != nil {
- r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err))
- return
- }
-
- if logBody {
- // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's
- // Body as a NoOpCloser and will not be reset after read by the HTTP
- // client reader.
- r.ResetBody()
- }
-
- r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody)))
-}
-
-const logRespMsg = `DEBUG: Response %s/%s Details:
----[ RESPONSE ]--------------------------------------
-%s
------------------------------------------------------`
-
-const logRespErrMsg = `DEBUG ERROR: Response %s/%s:
----[ RESPONSE DUMP ERROR ]-----------------------------
-%s
------------------------------------------------------`
-
-func logResponse(r *request.Request) {
- var msg = "no response data"
- if r.HTTPResponse != nil {
- logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
- dumpedBody, err := httputil.DumpResponse(r.HTTPResponse, logBody)
- if err != nil {
- r.Config.Logger.Log(fmt.Sprintf(logRespErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err))
- return
- }
-
- msg = string(dumpedBody)
- } else if r.Error != nil {
- msg = r.Error.Error()
- }
- r.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.ClientInfo.ServiceName, r.Operation.Name, msg))
+ c.Handlers.Send.PushFrontNamed(request.NamedHandler{Name: "awssdk.client.LogRequest", Fn: logRequest})
+ c.Handlers.Send.PushBackNamed(request.NamedHandler{Name: "awssdk.client.LogResponse", Fn: logResponse})
}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
index 43a3676..e25a460 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go
@@ -15,11 +15,11 @@ import (
// the MaxRetries method:
//
// type retryer struct {
-// service.DefaultRetryer
+// client.DefaultRetryer
// }
//
// // This implementation always has 100 max retries
-// func (d retryer) MaxRetries() uint { return 100 }
+// func (d retryer) MaxRetries() int { return 100 }
type DefaultRetryer struct {
NumMaxRetries int
}
@@ -54,6 +54,12 @@ func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration {
// ShouldRetry returns true if the request should be retried.
func (d DefaultRetryer) ShouldRetry(r *request.Request) bool {
+ // If one of the other handlers already set the retry state
+ // we don't want to override it based on the service's state
+ if r.Retryable != nil {
+ return *r.Retryable
+ }
+
if r.HTTPResponse.StatusCode >= 500 {
return true
}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
new file mode 100644
index 0000000..1f39c91
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go
@@ -0,0 +1,108 @@
+package client
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http/httputil"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/request"
+)
+
+const logReqMsg = `DEBUG: Request %s/%s Details:
+---[ REQUEST POST-SIGN ]-----------------------------
+%s
+-----------------------------------------------------`
+
+const logReqErrMsg = `DEBUG ERROR: Request %s/%s:
+---[ REQUEST DUMP ERROR ]-----------------------------
+%s
+------------------------------------------------------`
+
+type logWriter struct {
+ // Logger is what we will use to log the payload of a response.
+ Logger aws.Logger
+ // buf stores the contents of what has been read
+ buf *bytes.Buffer
+}
+
+func (logger *logWriter) Write(b []byte) (int, error) {
+ return logger.buf.Write(b)
+}
+
+type teeReaderCloser struct {
+ // io.Reader will be a tee reader that is used during logging.
+ // This structure will read from a body and write the contents to a logger.
+ io.Reader
+ // Source is used just to close when we are done reading.
+ Source io.ReadCloser
+}
+
+func (reader *teeReaderCloser) Close() error {
+ return reader.Source.Close()
+}
+
+func logRequest(r *request.Request) {
+ logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
+ dumpedBody, err := httputil.DumpRequestOut(r.HTTPRequest, logBody)
+ if err != nil {
+ r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err))
+ return
+ }
+
+ if logBody {
+ // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's
+ // Body as a NoOpCloser and will not be reset after read by the HTTP
+ // client reader.
+ r.ResetBody()
+ }
+
+ r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody)))
+}
+
+const logRespMsg = `DEBUG: Response %s/%s Details:
+---[ RESPONSE ]--------------------------------------
+%s
+-----------------------------------------------------`
+
+const logRespErrMsg = `DEBUG ERROR: Response %s/%s:
+---[ RESPONSE DUMP ERROR ]-----------------------------
+%s
+-----------------------------------------------------`
+
+func logResponse(r *request.Request) {
+ lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)}
+ r.HTTPResponse.Body = &teeReaderCloser{
+ Reader: io.TeeReader(r.HTTPResponse.Body, lw),
+ Source: r.HTTPResponse.Body,
+ }
+
+ handlerFn := func(req *request.Request) {
+ body, err := httputil.DumpResponse(req.HTTPResponse, false)
+ if err != nil {
+ lw.Logger.Log(fmt.Sprintf(logRespErrMsg, req.ClientInfo.ServiceName, req.Operation.Name, err))
+ return
+ }
+
+ b, err := ioutil.ReadAll(lw.buf)
+ if err != nil {
+ lw.Logger.Log(fmt.Sprintf(logRespErrMsg, req.ClientInfo.ServiceName, req.Operation.Name, err))
+ return
+ }
+ lw.Logger.Log(fmt.Sprintf(logRespMsg, req.ClientInfo.ServiceName, req.Operation.Name, string(body)))
+ if req.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) {
+ lw.Logger.Log(string(b))
+ }
+ }
+
+ const handlerName = "awsdk.client.LogResponse.ResponseBody"
+
+ r.Handlers.Unmarshal.SetBackNamed(request.NamedHandler{
+ Name: handlerName, Fn: handlerFn,
+ })
+ r.Handlers.UnmarshalError.SetBackNamed(request.NamedHandler{
+ Name: handlerName, Fn: handlerFn,
+ })
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go
index 948e0a6..ae3a286 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/config.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go
@@ -53,6 +53,13 @@ type Config struct {
// to use based on region.
EndpointResolver endpoints.Resolver
+ // EnforceShouldRetryCheck is used in the AfterRetryHandler to always call
+ // ShouldRetry regardless of whether or not if request.Retryable is set.
+ // This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck
+ // is not set, then ShouldRetry will only be called if request.Retryable is nil.
+ // Proper handling of the request.Retryable field is important when setting this field.
+ EnforceShouldRetryCheck *bool
+
// The region to send requests to. This parameter is required and must
// be configured globally or on a per-client basis unless otherwise
// noted. A full list of regions is found in the "Regions and Endpoints"
@@ -88,7 +95,7 @@ type Config struct {
// recoverable failures.
//
// When nil or the value does not implement the request.Retryer interface,
- // the request.DefaultRetryer will be used.
+ // the client.DefaultRetryer will be used.
//
// When both Retryer and MaxRetries are non-nil, the former is used and
// the latter ignored.
@@ -443,6 +450,10 @@ func mergeInConfig(dst *Config, other *Config) {
if other.DisableRestProtocolURICleaning != nil {
dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning
}
+
+ if other.EnforceShouldRetryCheck != nil {
+ dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck
+ }
}
// Copy will return a shallow copy of the Config object. If any additional
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go
index e8cf93d..8fdda53 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go
@@ -4,9 +4,9 @@ package aws
import "time"
-// An emptyCtx is a copy of the the Go 1.7 context.emptyCtx type. This
-// is copied to provide a 1.6 and 1.5 safe version of context that is compatible
-// with Go 1.7's Context.
+// An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to
+// provide a 1.6 and 1.5 safe version of context that is compatible with Go
+// 1.7's Context.
//
// An emptyCtx is never canceled, has no values, and has no deadline. It is not
// struct{}, since vars of this type must have distinct addresses.
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
index 3b73a7d..ff5d58e 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go
@@ -311,6 +311,24 @@ func TimeValue(v *time.Time) time.Time {
return time.Time{}
}
+// SecondsTimeValue converts an int64 pointer to a time.Time value
+// representing seconds since Epoch or time.Time{} if the pointer is nil.
+func SecondsTimeValue(v *int64) time.Time {
+ if v != nil {
+ return time.Unix((*v / 1000), 0)
+ }
+ return time.Time{}
+}
+
+// MillisecondsTimeValue converts an int64 pointer to a time.Time value
+// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil.
+func MillisecondsTimeValue(v *int64) time.Time {
+ if v != nil {
+ return time.Unix(0, (*v * 1000000))
+ }
+ return time.Time{}
+}
+
// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC".
// The result is undefined if the Unix time cannot be represented by an int64.
// Which includes calling TimeUnixMilli on a zero Time is undefined.
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
index 08a6665..495e3ef 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
@@ -27,7 +27,7 @@ type lener interface {
// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable
// to determine request body length and no "Content-Length" was specified it will panic.
//
-// The Content-Length will only be aded to the request if the length of the body
+// The Content-Length will only be added to the request if the length of the body
// is greater than 0. If the body is empty or the current `Content-Length`
// header is <= 0, the header will also be stripped.
var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) {
@@ -71,8 +71,8 @@ var reStatusCode = regexp.MustCompile(`^(\d{3})`)
// ValidateReqSigHandler is a request handler to ensure that the request's
// signature doesn't expire before it is sent. This can happen when a request
-// is built and signed signficantly before it is sent. Or significant delays
-// occur whne retrying requests that would cause the signature to expire.
+// is built and signed significantly before it is sent. Or significant delays
+// occur when retrying requests that would cause the signature to expire.
var ValidateReqSigHandler = request.NamedHandler{
Name: "core.ValidateReqSigHandler",
Fn: func(r *request.Request) {
@@ -98,54 +98,95 @@ var ValidateReqSigHandler = request.NamedHandler{
}
// SendHandler is a request handler to send service request using HTTP client.
-var SendHandler = request.NamedHandler{Name: "core.SendHandler", Fn: func(r *request.Request) {
- var err error
- r.HTTPResponse, err = r.Config.HTTPClient.Do(r.HTTPRequest)
- if err != nil {
- // Prevent leaking if an HTTPResponse was returned. Clean up
- // the body.
- if r.HTTPResponse != nil {
- r.HTTPResponse.Body.Close()
+var SendHandler = request.NamedHandler{
+ Name: "core.SendHandler",
+ Fn: func(r *request.Request) {
+ sender := sendFollowRedirects
+ if r.DisableFollowRedirects {
+ sender = sendWithoutFollowRedirects
}
- // Capture the case where url.Error is returned for error processing
- // response. e.g. 301 without location header comes back as string
- // error and r.HTTPResponse is nil. Other url redirect errors will
- // comeback in a similar method.
- if e, ok := err.(*url.Error); ok && e.Err != nil {
- if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil {
- code, _ := strconv.ParseInt(s[1], 10, 64)
- r.HTTPResponse = &http.Response{
- StatusCode: int(code),
- Status: http.StatusText(int(code)),
- Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
- }
- return
- }
+
+ if request.NoBody == r.HTTPRequest.Body {
+ // Strip off the request body if the NoBody reader was used as a
+ // place holder for a request body. This prevents the SDK from
+ // making requests with a request body when it would be invalid
+ // to do so.
+ //
+ // Use a shallow copy of the http.Request to ensure the race condition
+ // of transport on Body will not trigger
+ reqOrig, reqCopy := r.HTTPRequest, *r.HTTPRequest
+ reqCopy.Body = nil
+ r.HTTPRequest = &reqCopy
+ defer func() {
+ r.HTTPRequest = reqOrig
+ }()
}
- if r.HTTPResponse == nil {
- // Add a dummy request response object to ensure the HTTPResponse
- // value is consistent.
+
+ var err error
+ r.HTTPResponse, err = sender(r)
+ if err != nil {
+ handleSendError(r, err)
+ }
+ },
+}
+
+func sendFollowRedirects(r *request.Request) (*http.Response, error) {
+ return r.Config.HTTPClient.Do(r.HTTPRequest)
+}
+
+func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) {
+ transport := r.Config.HTTPClient.Transport
+ if transport == nil {
+ transport = http.DefaultTransport
+ }
+
+ return transport.RoundTrip(r.HTTPRequest)
+}
+
+func handleSendError(r *request.Request, err error) {
+ // Prevent leaking if an HTTPResponse was returned. Clean up
+ // the body.
+ if r.HTTPResponse != nil {
+ r.HTTPResponse.Body.Close()
+ }
+ // Capture the case where url.Error is returned for error processing
+ // response. e.g. 301 without location header comes back as string
+ // error and r.HTTPResponse is nil. Other URL redirect errors will
+ // comeback in a similar method.
+ if e, ok := err.(*url.Error); ok && e.Err != nil {
+ if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil {
+ code, _ := strconv.ParseInt(s[1], 10, 64)
r.HTTPResponse = &http.Response{
- StatusCode: int(0),
- Status: http.StatusText(int(0)),
+ StatusCode: int(code),
+ Status: http.StatusText(int(code)),
Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
}
+ return
}
- // Catch all other request errors.
- r.Error = awserr.New("RequestError", "send request failed", err)
- r.Retryable = aws.Bool(true) // network errors are retryable
-
- // Override the error with a context canceled error, if that was canceled.
- ctx := r.Context()
- select {
- case <-ctx.Done():
- r.Error = awserr.New(request.CanceledErrorCode,
- "request context canceled", ctx.Err())
- r.Retryable = aws.Bool(false)
- default:
+ }
+ if r.HTTPResponse == nil {
+ // Add a dummy request response object to ensure the HTTPResponse
+ // value is consistent.
+ r.HTTPResponse = &http.Response{
+ StatusCode: int(0),
+ Status: http.StatusText(int(0)),
+ Body: ioutil.NopCloser(bytes.NewReader([]byte{})),
}
}
-}}
+ // Catch all other request errors.
+ r.Error = awserr.New("RequestError", "send request failed", err)
+ r.Retryable = aws.Bool(true) // network errors are retryable
+
+ // Override the error with a context canceled error, if that was canceled.
+ ctx := r.Context()
+ select {
+ case <-ctx.Done():
+ r.Error = awserr.New(request.CanceledErrorCode,
+ "request context canceled", ctx.Err())
+ r.Retryable = aws.Bool(false)
+ default:
+ }
+}
// ValidateResponseHandler is a request handler to validate service response.
var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) {
@@ -160,7 +201,7 @@ var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseH
var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) {
// If one of the other handlers already set the retry state
// we don't want to override it based on the service's state
- if r.Retryable == nil {
+ if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) {
r.Retryable = aws.Bool(r.ShouldRetry(r))
}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
index 6efc77b..f298d65 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go
@@ -13,7 +13,7 @@ var (
//
// @readonly
ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders",
- `no valid providers in chain. Deprecated.
+ `no valid providers in chain. Deprecated.
For verbose messaging see aws.Config.CredentialsChainVerboseErrors`,
nil)
)
@@ -39,16 +39,18 @@ var (
// does not return any credentials ChainProvider will return the error
// ErrNoValidProvidersFoundInChain
//
-// creds := NewChainCredentials(
-// []Provider{
-// &EnvProvider{},
-// &EC2RoleProvider{
+// creds := credentials.NewChainCredentials(
+// []credentials.Provider{
+// &credentials.EnvProvider{},
+// &ec2rolecreds.EC2RoleProvider{
// Client: ec2metadata.New(sess),
// },
// })
//
// // Usage of ChainCredentials with aws.Config
-// svc := ec2.New(&aws.Config{Credentials: creds})
+// svc := ec2.New(session.Must(session.NewSession(&aws.Config{
+// Credentials: creds,
+// })))
//
type ChainProvider struct {
Providers []Provider
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
index c29baf0..42416fc 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
@@ -14,7 +14,7 @@
//
// Example of using the environment variable credentials.
//
-// creds := NewEnvCredentials()
+// creds := credentials.NewEnvCredentials()
//
// // Retrieve the credentials value
// credValue, err := creds.Get()
@@ -26,7 +26,7 @@
// This may be helpful to proactively expire credentials and refresh them sooner
// than they would naturally expire on their own.
//
-// creds := NewCredentials(&EC2RoleProvider{})
+// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{})
// creds.Expire()
// credsValue, err := creds.Get()
// // New credentials will be retrieved instead of from cache.
@@ -43,7 +43,7 @@
// func (m *MyProvider) Retrieve() (Value, error) {...}
// func (m *MyProvider) IsExpired() bool {...}
//
-// creds := NewCredentials(&MyProvider{})
+// creds := credentials.NewCredentials(&MyProvider{})
// credValue, err := creds.Get()
//
package credentials
@@ -60,7 +60,9 @@ import (
// when making service API calls. For example, when accessing public
// s3 buckets.
//
-// svc := s3.New(&aws.Config{Credentials: AnonymousCredentials})
+// svc := s3.New(session.Must(session.NewSession(&aws.Config{
+// Credentials: credentials.AnonymousCredentials,
+// })))
// // Access public S3 buckets.
//
// @readonly
@@ -97,6 +99,27 @@ type Provider interface {
IsExpired() bool
}
+// An ErrorProvider is a stub credentials provider that always returns an error
+// this is used by the SDK when construction a known provider is not possible
+// due to an error.
+type ErrorProvider struct {
+ // The error to be returned from Retrieve
+ Err error
+
+ // The provider name to set on the Retrieved returned Value
+ ProviderName string
+}
+
+// Retrieve will always return the error that the ErrorProvider was created with.
+func (p ErrorProvider) Retrieve() (Value, error) {
+ return Value{ProviderName: p.ProviderName}, p.Err
+}
+
+// IsExpired will always return not expired.
+func (p ErrorProvider) IsExpired() bool {
+ return false
+}
+
// A Expiry provides shared expiration logic to be used by credentials
// providers to implement expiry functionality.
//
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
index 96655bc..c14231a 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go
@@ -29,6 +29,7 @@ var (
// Environment variables used:
//
// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY
+//
// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
type EnvProvider struct {
retrieved bool
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
index 7fb7cbf..51e21e0 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go
@@ -3,11 +3,11 @@ package credentials
import (
"fmt"
"os"
- "path/filepath"
"github.com/go-ini/ini"
"github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/internal/shareddefaults"
)
// SharedCredsProviderName provides a name of SharedCreds provider
@@ -15,8 +15,6 @@ const SharedCredsProviderName = "SharedCredentialsProvider"
var (
// ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found.
- //
- // @readonly
ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil)
)
@@ -117,22 +115,23 @@ func loadProfile(filename, profile string) (Value, error) {
//
// Will return an error if the user's home directory path cannot be found.
func (p *SharedCredentialsProvider) filename() (string, error) {
- if p.Filename == "" {
- if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); p.Filename != "" {
- return p.Filename, nil
- }
-
- homeDir := os.Getenv("HOME") // *nix
- if homeDir == "" { // Windows
- homeDir = os.Getenv("USERPROFILE")
- }
- if homeDir == "" {
- return "", ErrSharedCredentialsHomeNotFound
- }
-
- p.Filename = filepath.Join(homeDir, ".aws", "credentials")
+ if len(p.Filename) != 0 {
+ return p.Filename, nil
+ }
+
+ if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(p.Filename) != 0 {
+ return p.Filename, nil
}
+ if home := shareddefaults.UserHomeDir(); len(home) == 0 {
+ // Backwards compatibility of home directly not found error being returned.
+ // This error is too verbose, failure when opening the file would of been
+ // a better error to return.
+ return "", ErrSharedCredentialsHomeNotFound
+ }
+
+ p.Filename = shareddefaults.SharedCredentialsFilename()
+
return p.Filename, nil
}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
index b840623..4108e43 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
@@ -12,7 +12,7 @@ between multiple Credentials, Sessions or service clients.
Assume Role
To assume an IAM role using STS with the SDK you can create a new Credentials
-with the SDKs's stscreds package.
+with the SDKs's stscreds package.
// Initial credentials loaded from SDK's default credential chain. Such as
// the environment, shared credentials (~/.aws/credentials), or EC2 Instance
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
index 110ca83..07afe3b 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
@@ -10,10 +10,12 @@ package defaults
import (
"fmt"
"net/http"
+ "net/url"
"os"
"time"
"github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/corehandlers"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
@@ -96,23 +98,51 @@ func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credenti
})
}
-// RemoteCredProvider returns a credenitials provider for the default remote
+const (
+ httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI"
+ ecsCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI"
+)
+
+// RemoteCredProvider returns a credentials provider for the default remote
// endpoints such as EC2 or ECS Roles.
func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider {
- ecsCredURI := os.Getenv("AWS_CONTAINER_CREDENTIALS_RELATIVE_URI")
+ if u := os.Getenv(httpProviderEnvVar); len(u) > 0 {
+ return localHTTPCredProvider(cfg, handlers, u)
+ }
- if len(ecsCredURI) > 0 {
- return ecsCredProvider(cfg, handlers, ecsCredURI)
+ if uri := os.Getenv(ecsCredsProviderEnvVar); len(uri) > 0 {
+ u := fmt.Sprintf("http://169.254.170.2%s", uri)
+ return httpCredProvider(cfg, handlers, u)
}
return ec2RoleProvider(cfg, handlers)
}
-func ecsCredProvider(cfg aws.Config, handlers request.Handlers, uri string) credentials.Provider {
- const host = `169.254.170.2`
+func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider {
+ var errMsg string
+
+ parsed, err := url.Parse(u)
+ if err != nil {
+ errMsg = fmt.Sprintf("invalid URL, %v", err)
+ } else if host := aws.URLHostname(parsed); !(host == "localhost" || host == "127.0.0.1") {
+ errMsg = fmt.Sprintf("invalid host address, %q, only localhost and 127.0.0.1 are valid.", host)
+ }
+
+ if len(errMsg) > 0 {
+ if cfg.Logger != nil {
+ cfg.Logger.Log("Ignoring, HTTP credential provider", errMsg, err)
+ }
+ return credentials.ErrorProvider{
+ Err: awserr.New("CredentialsEndpointError", errMsg, err),
+ ProviderName: endpointcreds.ProviderName,
+ }
+ }
+
+ return httpCredProvider(cfg, handlers, u)
+}
- return endpointcreds.NewProviderClient(cfg, handlers,
- fmt.Sprintf("http://%s%s", host, uri),
+func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider {
+ return endpointcreds.NewProviderClient(cfg, handlers, u,
func(p *endpointcreds.Provider) {
p.ExpiryWindow = 5 * time.Minute
},
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go
new file mode 100644
index 0000000..ca0ee1d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/shared_config.go
@@ -0,0 +1,27 @@
+package defaults
+
+import (
+ "github.com/aws/aws-sdk-go/internal/shareddefaults"
+)
+
+// SharedCredentialsFilename returns the SDK's default file path
+// for the shared credentials file.
+//
+// Builds the shared config file path based on the OS's platform.
+//
+// - Linux/Unix: $HOME/.aws/credentials
+// - Windows: %USERPROFILE%\.aws\credentials
+func SharedCredentialsFilename() string {
+ return shareddefaults.SharedCredentialsFilename()
+}
+
+// SharedConfigFilename returns the SDK's default file path for
+// the shared config file.
+//
+// Builds the shared config file path based on the OS's platform.
+//
+// - Linux/Unix: $HOME/.aws/config
+// - Windows: %USERPROFILE%\.aws\config
+func SharedConfigFilename() string {
+ return shareddefaults.SharedConfigFilename()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/doc.go
new file mode 100644
index 0000000..4fcb616
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/doc.go
@@ -0,0 +1,56 @@
+// Package aws provides the core SDK's utilities and shared types. Use this package's
+// utilities to simplify setting and reading API operations parameters.
+//
+// Value and Pointer Conversion Utilities
+//
+// This package includes a helper conversion utility for each scalar type the SDK's
+// API use. These utilities make getting a pointer of the scalar, and dereferencing
+// a pointer easier.
+//
+// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value.
+// The Pointer to value will safely dereference the pointer and return its value.
+// If the pointer was nil, the scalar's zero value will be returned.
+//
+// The value to pointer functions will be named after the scalar type. So get a
+// *string from a string value use the "String" function. This makes it easy to
+// to get pointer of a literal string value, because getting the address of a
+// literal requires assigning the value to a variable first.
+//
+// var strPtr *string
+//
+// // Without the SDK's conversion functions
+// str := "my string"
+// strPtr = &str
+//
+// // With the SDK's conversion functions
+// strPtr = aws.String("my string")
+//
+// // Convert *string to string value
+// str = aws.StringValue(strPtr)
+//
+// In addition to scalars the aws package also includes conversion utilities for
+// map and slice for commonly types used in API parameters. The map and slice
+// conversion functions use similar naming pattern as the scalar conversion
+// functions.
+//
+// var strPtrs []*string
+// var strs []string = []string{"Go", "Gophers", "Go"}
+//
+// // Convert []string to []*string
+// strPtrs = aws.StringSlice(strs)
+//
+// // Convert []*string to []string
+// strs = aws.StringValueSlice(strPtrs)
+//
+// SDK Default HTTP Client
+//
+// The SDK will use the http.DefaultClient if a HTTP client is not provided to
+// the SDK's Session, or service client constructor. This means that if the
+// http.DefaultClient is modified by other components of your application the
+// modifications will be picked up by the SDK as well.
+//
+// In some cases this might be intended, but it is a better practice to create
+// a custom HTTP Client to share explicitly through your application. You can
+// configure the SDK to use the custom HTTP Client by setting the HTTPClient
+// value of the SDK's Config type when creating a Session or service client.
+package aws
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
index 4adca3a..1769700 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
@@ -47,6 +47,7 @@ const (
ApigatewayServiceID = "apigateway" // Apigateway.
ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling.
Appstream2ServiceID = "appstream2" // Appstream2.
+ AthenaServiceID = "athena" // Athena.
AutoscalingServiceID = "autoscaling" // Autoscaling.
BatchServiceID = "batch" // Batch.
BudgetsServiceID = "budgets" // Budgets.
@@ -54,12 +55,14 @@ const (
CloudformationServiceID = "cloudformation" // Cloudformation.
CloudfrontServiceID = "cloudfront" // Cloudfront.
CloudhsmServiceID = "cloudhsm" // Cloudhsm.
+ Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2.
CloudsearchServiceID = "cloudsearch" // Cloudsearch.
CloudtrailServiceID = "cloudtrail" // Cloudtrail.
CodebuildServiceID = "codebuild" // Codebuild.
CodecommitServiceID = "codecommit" // Codecommit.
CodedeployServiceID = "codedeploy" // Codedeploy.
CodepipelineServiceID = "codepipeline" // Codepipeline.
+ CodestarServiceID = "codestar" // Codestar.
CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity.
CognitoIdpServiceID = "cognito-idp" // CognitoIdp.
CognitoSyncServiceID = "cognito-sync" // CognitoSync.
@@ -83,11 +86,14 @@ const (
ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce.
ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder.
EmailServiceID = "email" // Email.
+ EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace.
EsServiceID = "es" // Es.
EventsServiceID = "events" // Events.
FirehoseServiceID = "firehose" // Firehose.
GameliftServiceID = "gamelift" // Gamelift.
GlacierServiceID = "glacier" // Glacier.
+ GlueServiceID = "glue" // Glue.
+ GreengrassServiceID = "greengrass" // Greengrass.
HealthServiceID = "health" // Health.
IamServiceID = "iam" // Iam.
ImportexportServiceID = "importexport" // Importexport.
@@ -102,7 +108,9 @@ const (
MachinelearningServiceID = "machinelearning" // Machinelearning.
MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics.
MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace.
+ MghServiceID = "mgh" // Mgh.
MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics.
+ ModelsLexServiceID = "models.lex" // ModelsLex.
MonitoringServiceID = "monitoring" // Monitoring.
MturkRequesterServiceID = "mturk-requester" // MturkRequester.
OpsworksServiceID = "opsworks" // Opsworks.
@@ -142,17 +150,20 @@ const (
// DefaultResolver returns an Endpoint resolver that will be able
// to resolve endpoints for: AWS Standard, AWS China, and AWS GovCloud (US).
//
-// Casting the return value of this func to a EnumPartitions will
-// allow you to get a list of the partitions in the order the endpoints
-// will be resolved in.
+// Use DefaultPartitions() to get the list of the default partitions.
+func DefaultResolver() Resolver {
+ return defaultPartitions
+}
+
+// DefaultPartitions returns a list of the partitions the SDK is bundled
+// with. The available partitions are: AWS Standard, AWS China, and AWS GovCloud (US).
//
-// resolver := endpoints.DefaultResolver()
-// partitions := resolver.(endpoints.EnumPartitions).Partitions()
+// partitions := endpoints.DefaultPartitions
// for _, p := range partitions {
// // ... inspect partitions
// }
-func DefaultResolver() Resolver {
- return defaultPartitions
+func DefaultPartitions() []Partition {
+ return defaultPartitions.Partitions()
}
var defaultPartitions = partitions{
@@ -253,9 +264,11 @@ var awsPartition = partition{
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-1": endpoint{},
@@ -301,6 +314,17 @@ var awsPartition = partition{
"us-west-2": endpoint{},
},
},
+ "athena": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
"autoscaling": service{
Defaults: endpoint{
Protocols: []string{"http", "https"},
@@ -325,7 +349,15 @@ var awsPartition = partition{
"batch": service{
Endpoints: endpoints{
- "us-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
},
},
"budgets": service{
@@ -347,6 +379,7 @@ var awsPartition = partition{
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-2": endpoint{},
@@ -400,6 +433,16 @@ var awsPartition = partition{
"us-west-2": endpoint{},
},
},
+ "cloudhsmv2": service{
+
+ Endpoints: endpoints{
+ "ap-southeast-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
"cloudsearch": service{
Endpoints: endpoints{
@@ -440,20 +483,33 @@ var awsPartition = partition{
"ap-northeast-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
+ "us-west-1": endpoint{},
"us-west-2": endpoint{},
},
},
"codecommit": service{
Endpoints: endpoints{
- "eu-west-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-2": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
},
},
"codedeploy": service{
@@ -479,13 +535,32 @@ var awsPartition = partition{
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
"sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "codestar": service{
+
+ Endpoints: endpoints{
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
"us-west-2": endpoint{},
},
},
@@ -494,6 +569,8 @@ var awsPartition = partition{
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
@@ -508,6 +585,8 @@ var awsPartition = partition{
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
@@ -522,6 +601,8 @@ var awsPartition = partition{
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
@@ -620,11 +701,16 @@ var awsPartition = partition{
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
"us-east-1": endpoint{},
+ "us-east-2": endpoint{},
"us-west-2": endpoint{},
},
},
@@ -692,6 +778,7 @@ var awsPartition = partition{
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"ca-central-1": endpoint{},
@@ -762,6 +849,7 @@ var awsPartition = partition{
Endpoints: endpoints{
"ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
"eu-west-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
@@ -770,7 +858,7 @@ var awsPartition = partition{
},
"elasticloadbalancing": service{
Defaults: endpoint{
- Protocols: []string{"http", "https"},
+ Protocols: []string{"https"},
},
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
@@ -836,6 +924,16 @@ var awsPartition = partition{
"us-west-2": endpoint{},
},
},
+ "entitlement.marketplace": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "aws-marketplace",
+ },
+ },
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
"es": service{
Endpoints: endpoints{
@@ -844,8 +942,10 @@ var awsPartition = partition{
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
"sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
@@ -875,9 +975,12 @@ var awsPartition = partition{
"firehose": service{
Endpoints: endpoints{
- "eu-west-1": endpoint{},
- "us-east-1": endpoint{},
- "us-west-2": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
},
},
"gamelift": service{
@@ -887,10 +990,15 @@ var awsPartition = partition{
"ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
"sa-east-1": endpoint{},
"us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
"us-west-2": endpoint{},
},
},
@@ -902,6 +1010,7 @@ var awsPartition = partition{
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
@@ -913,6 +1022,27 @@ var awsPartition = partition{
"us-west-2": endpoint{},
},
},
+ "glue": service{
+
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
+ "greengrass": service{
+ IsRegionalized: boxedTrue,
+ Defaults: endpoint{
+ Protocols: []string{"https"},
+ },
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
"health": service{
Endpoints: endpoints{
@@ -956,6 +1086,7 @@ var awsPartition = partition{
"ap-southeast-2": endpoint{},
"eu-west-1": endpoint{},
"us-east-1": endpoint{},
+ "us-west-1": endpoint{},
"us-west-2": endpoint{},
},
},
@@ -1032,9 +1163,11 @@ var awsPartition = partition{
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-1": endpoint{},
@@ -1044,7 +1177,16 @@ var awsPartition = partition{
"lightsail": service{
Endpoints: endpoints{
- "us-east-1": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
},
},
"logs": service{
@@ -1102,12 +1244,28 @@ var awsPartition = partition{
"us-west-2": endpoint{},
},
},
+ "mgh": service{
+
+ Endpoints: endpoints{
+ "us-west-2": endpoint{},
+ },
+ },
"mobileanalytics": service{
Endpoints: endpoints{
"us-east-1": endpoint{},
},
},
+ "models.lex": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "lex",
+ },
+ },
+ Endpoints: endpoints{
+ "us-east-1": endpoint{},
+ },
+ },
"monitoring": service{
Defaults: endpoint{
Protocols: []string{"http", "https"},
@@ -1354,6 +1512,7 @@ var awsPartition = partition{
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"ca-central-1": endpoint{},
@@ -1378,19 +1537,27 @@ var awsPartition = partition{
"sms": service{
Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
"ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
"eu-west-1": endpoint{},
"us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
},
},
"snowball": service{
Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-1": endpoint{},
@@ -1447,10 +1614,13 @@ var awsPartition = partition{
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
"sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
@@ -1462,8 +1632,10 @@ var awsPartition = partition{
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-2": endpoint{},
@@ -1474,6 +1646,7 @@ var awsPartition = partition{
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"ca-central-1": endpoint{},
@@ -1489,7 +1662,7 @@ var awsPartition = partition{
},
"streams.dynamodb": service{
Defaults: endpoint{
- Protocols: []string{"http", "http", "https", "https"},
+ Protocols: []string{"http", "https"},
CredentialScope: credentialScope{
Service: "dynamodb",
},
@@ -1544,9 +1717,33 @@ var awsPartition = partition{
"eu-west-2": endpoint{},
"sa-east-1": endpoint{},
"us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-1": endpoint{},
- "us-west-2": endpoint{},
+ "us-east-1-fips": endpoint{
+ Hostname: "sts-fips.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ "us-east-2": endpoint{},
+ "us-east-2-fips": endpoint{
+ Hostname: "sts-fips.us-east-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-2",
+ },
+ },
+ "us-west-1": endpoint{},
+ "us-west-1-fips": endpoint{
+ Hostname: "sts-fips.us-west-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-1",
+ },
+ },
+ "us-west-2": endpoint{},
+ "us-west-2-fips": endpoint{
+ Hostname: "sts-fips.us-west-2.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-west-2",
+ },
+ },
},
},
"support": service{
@@ -1612,6 +1809,7 @@ var awsPartition = partition{
"ap-northeast-1": endpoint{},
"eu-west-1": endpoint{},
"us-east-1": endpoint{},
+ "us-west-1": endpoint{},
"us-west-2": endpoint{},
},
},
@@ -1634,6 +1832,7 @@ var awsPartition = partition{
"ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
"us-east-1": endpoint{},
"us-west-2": endpoint{},
},
@@ -1646,8 +1845,10 @@ var awsPartition = partition{
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
"sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
@@ -1684,6 +1885,18 @@ var awscnPartition = partition{
},
},
Services: services{
+ "application-autoscaling": service{
+ Defaults: endpoint{
+ Hostname: "autoscaling.{region}.amazonaws.com",
+ Protocols: []string{"http", "https"},
+ CredentialScope: credentialScope{
+ Service: "application-autoscaling",
+ },
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
"autoscaling": service{
Defaults: endpoint{
Protocols: []string{"http", "https"},
@@ -1749,6 +1962,18 @@ var awscnPartition = partition{
},
},
},
+ "ecr": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
+ "ecs": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
"elasticache": service{
Endpoints: endpoints{
@@ -1763,7 +1988,7 @@ var awscnPartition = partition{
},
"elasticloadbalancing": service{
Defaults: endpoint{
- Protocols: []string{"http", "https"},
+ Protocols: []string{"https"},
},
Endpoints: endpoints{
"cn-north-1": endpoint{},
@@ -1804,6 +2029,16 @@ var awscnPartition = partition{
},
},
},
+ "iot": service{
+ Defaults: endpoint{
+ CredentialScope: credentialScope{
+ Service: "execute-api",
+ },
+ },
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
"kinesis": service{
Endpoints: endpoints{
@@ -1845,6 +2080,12 @@ var awscnPartition = partition{
"cn-north-1": endpoint{},
},
},
+ "snowball": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
"sns": service{
Defaults: endpoint{
Protocols: []string{"http", "https"},
@@ -1862,6 +2103,12 @@ var awscnPartition = partition{
"cn-north-1": endpoint{},
},
},
+ "ssm": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
"storagegateway": service{
Endpoints: endpoints{
@@ -1870,7 +2117,7 @@ var awscnPartition = partition{
},
"streams.dynamodb": service{
Defaults: endpoint{
- Protocols: []string{"http", "http", "https", "https"},
+ Protocols: []string{"http", "https"},
CredentialScope: credentialScope{
Service: "dynamodb",
},
@@ -1926,6 +2173,18 @@ var awsusgovPartition = partition{
},
},
Services: services{
+ "acm": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
+ "apigateway": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
"autoscaling": service{
Endpoints: endpoints{
@@ -1952,6 +2211,12 @@ var awsusgovPartition = partition{
"us-gov-west-1": endpoint{},
},
},
+ "codedeploy": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
"config": service{
Endpoints: endpoints{
@@ -2009,6 +2274,12 @@ var awsusgovPartition = partition{
},
},
},
+ "events": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
"glacier": service{
Endpoints: endpoints{
@@ -2042,6 +2313,12 @@ var awsusgovPartition = partition{
"us-gov-west-1": endpoint{},
},
},
+ "lambda": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
"logs": service{
Endpoints: endpoints{
@@ -2066,6 +2343,12 @@ var awsusgovPartition = partition{
"us-gov-west-1": endpoint{},
},
},
+ "rekognition": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
"s3": service{
Defaults: endpoint{
SignatureVersions: []string{"s3", "s3v4"},
@@ -2083,6 +2366,12 @@ var awsusgovPartition = partition{
},
},
},
+ "sms": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
"snowball": service{
Endpoints: endpoints{
@@ -2106,6 +2395,12 @@ var awsusgovPartition = partition{
},
},
},
+ "ssm": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
"streams.dynamodb": service{
Defaults: endpoint{
CredentialScope: credentialScope{
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go
index a0e9bc4..84316b9 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go
@@ -21,12 +21,12 @@
// partitions := resolver.(endpoints.EnumPartitions).Partitions()
//
// for _, p := range partitions {
-// fmt.Println("Regions for", p.Name)
+// fmt.Println("Regions for", p.ID())
// for id, _ := range p.Regions() {
// fmt.Println("*", id)
// }
//
-// fmt.Println("Services for", p.Name)
+// fmt.Println("Services for", p.ID())
// for id, _ := range p.Services() {
// fmt.Println("*", id)
// }
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
index 37e19ab..9c3eedb 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go
@@ -124,6 +124,49 @@ type EnumPartitions interface {
Partitions() []Partition
}
+// RegionsForService returns a map of regions for the partition and service.
+// If either the partition or service does not exist false will be returned
+// as the second parameter.
+//
+// This example shows how to get the regions for DynamoDB in the AWS partition.
+// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID)
+//
+// This is equivalent to using the partition directly.
+// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions()
+func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) {
+ for _, p := range ps {
+ if p.ID() != partitionID {
+ continue
+ }
+ if _, ok := p.p.Services[serviceID]; !ok {
+ break
+ }
+
+ s := Service{
+ id: serviceID,
+ p: p.p,
+ }
+ return s.Regions(), true
+ }
+
+ return map[string]Region{}, false
+}
+
+// PartitionForRegion returns the first partition which includes the region
+// passed in. This includes both known regions and regions which match
+// a pattern supported by the partition which may include regions that are
+// not explicitly known by the partition. Use the Regions method of the
+// returned Partition if explicit support is needed.
+func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) {
+ for _, p := range ps {
+ if _, ok := p.p.Regions[regionID]; ok || p.p.RegionRegex.MatchString(regionID) {
+ return p, true
+ }
+ }
+
+ return Partition{}, false
+}
+
// A Partition provides the ability to enumerate the partition's regions
// and services.
type Partition struct {
@@ -132,7 +175,7 @@ type Partition struct {
}
// ID returns the identifier of the partition.
-func (p *Partition) ID() string { return p.id }
+func (p Partition) ID() string { return p.id }
// EndpointFor attempts to resolve the endpoint based on service and region.
// See Options for information on configuring how the endpoint is resolved.
@@ -155,13 +198,13 @@ func (p *Partition) ID() string { return p.id }
// Errors that can be returned.
// * UnknownServiceError
// * UnknownEndpointError
-func (p *Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
return p.p.EndpointFor(service, region, opts...)
}
// Regions returns a map of Regions indexed by their ID. This is useful for
// enumerating over the regions in a partition.
-func (p *Partition) Regions() map[string]Region {
+func (p Partition) Regions() map[string]Region {
rs := map[string]Region{}
for id := range p.p.Regions {
rs[id] = Region{
@@ -175,7 +218,7 @@ func (p *Partition) Regions() map[string]Region {
// Services returns a map of Service indexed by their ID. This is useful for
// enumerating over the services in a partition.
-func (p *Partition) Services() map[string]Service {
+func (p Partition) Services() map[string]Service {
ss := map[string]Service{}
for id := range p.p.Services {
ss[id] = Service{
@@ -195,16 +238,16 @@ type Region struct {
}
// ID returns the region's identifier.
-func (r *Region) ID() string { return r.id }
+func (r Region) ID() string { return r.id }
// ResolveEndpoint resolves an endpoint from the context of the region given
// a service. See Partition.EndpointFor for usage and errors that can be returned.
-func (r *Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) {
return r.p.EndpointFor(service, r.id, opts...)
}
// Services returns a list of all services that are known to be in this region.
-func (r *Region) Services() map[string]Service {
+func (r Region) Services() map[string]Service {
ss := map[string]Service{}
for id, s := range r.p.Services {
if _, ok := s.Endpoints[r.id]; ok {
@@ -226,17 +269,38 @@ type Service struct {
}
// ID returns the identifier for the service.
-func (s *Service) ID() string { return s.id }
+func (s Service) ID() string { return s.id }
// ResolveEndpoint resolves an endpoint from the context of a service given
// a region. See Partition.EndpointFor for usage and errors that can be returned.
-func (s *Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
+func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) {
return s.p.EndpointFor(s.id, region, opts...)
}
+// Regions returns a map of Regions that the service is present in.
+//
+// A region is the AWS region the service exists in. Whereas a Endpoint is
+// an URL that can be resolved to a instance of a service.
+func (s Service) Regions() map[string]Region {
+ rs := map[string]Region{}
+ for id := range s.p.Services[s.id].Endpoints {
+ if _, ok := s.p.Regions[id]; ok {
+ rs[id] = Region{
+ id: id,
+ p: s.p,
+ }
+ }
+ }
+
+ return rs
+}
+
// Endpoints returns a map of Endpoints indexed by their ID for all known
// endpoints for a service.
-func (s *Service) Endpoints() map[string]Endpoint {
+//
+// A region is the AWS region the service exists in. Whereas a Endpoint is
+// an URL that can be resolved to a instance of a service.
+func (s Service) Endpoints() map[string]Endpoint {
es := map[string]Endpoint{}
for id := range s.p.Services[s.id].Endpoints {
es[id] = Endpoint{
@@ -259,15 +323,15 @@ type Endpoint struct {
}
// ID returns the identifier for an endpoint.
-func (e *Endpoint) ID() string { return e.id }
+func (e Endpoint) ID() string { return e.id }
// ServiceID returns the identifier the endpoint belongs to.
-func (e *Endpoint) ServiceID() string { return e.serviceID }
+func (e Endpoint) ServiceID() string { return e.serviceID }
// ResolveEndpoint resolves an endpoint from the context of a service and
// region the endpoint represents. See Partition.EndpointFor for usage and
// errors that can be returned.
-func (e *Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) {
+func (e Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) {
return e.p.EndpointFor(e.serviceID, e.id, opts...)
}
@@ -300,28 +364,6 @@ type EndpointNotFoundError struct {
Region string
}
-//// NewEndpointNotFoundError builds and returns NewEndpointNotFoundError.
-//func NewEndpointNotFoundError(p, s, r string) EndpointNotFoundError {
-// return EndpointNotFoundError{
-// awsError: awserr.New("EndpointNotFoundError", "unable to find endpoint", nil),
-// Partition: p,
-// Service: s,
-// Region: r,
-// }
-//}
-//
-//// Error returns string representation of the error.
-//func (e EndpointNotFoundError) Error() string {
-// extra := fmt.Sprintf("partition: %q, service: %q, region: %q",
-// e.Partition, e.Service, e.Region)
-// return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr())
-//}
-//
-//// String returns the string representation of the error.
-//func (e EndpointNotFoundError) String() string {
-// return e.Error()
-//}
-
// A UnknownServiceError is returned when the service does not resolve to an
// endpoint. Includes a list of all known services for the partition. Returned
// when a partition does not support the service.
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go
index fc7eada..05e92df 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go
@@ -209,17 +209,20 @@ import (
// DefaultResolver returns an Endpoint resolver that will be able
// to resolve endpoints for: {{ ListPartitionNames . }}.
//
- // Casting the return value of this func to a EnumPartitions will
- // allow you to get a list of the partitions in the order the endpoints
- // will be resolved in.
+ // Use DefaultPartitions() to get the list of the default partitions.
+ func DefaultResolver() Resolver {
+ return defaultPartitions
+ }
+
+ // DefaultPartitions returns a list of the partitions the SDK is bundled
+ // with. The available partitions are: {{ ListPartitionNames . }}.
//
- // resolver := endpoints.DefaultResolver()
- // partitions := resolver.(endpoints.EnumPartitions).Partitions()
+ // partitions := endpoints.DefaultPartitions
// for _, p := range partitions {
// // ... inspect partitions
// }
- func DefaultResolver() Resolver {
- return defaultPartitions
+ func DefaultPartitions() []Partition {
+ return defaultPartitions.Partitions()
}
var defaultPartitions = partitions{
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go
index a94f041..91a6f27 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go
@@ -4,7 +4,8 @@ package aws
// into a json string. This type can be used just like any other map.
//
// Example:
-// values := JSONValue{
+//
+// values := aws.JSONValue{
// "Foo": "Bar",
// }
// values["Baz"] = "Qux"
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go
index db87188..3babb5a 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/logger.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/logger.go
@@ -26,14 +26,14 @@ func (l *LogLevelType) Value() LogLevelType {
// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be
// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If
-// LogLevel is nill, will default to LogOff comparison.
+// LogLevel is nil, will default to LogOff comparison.
func (l *LogLevelType) Matches(v LogLevelType) bool {
c := l.Value()
return c&v == v
}
// AtLeast returns true if this LogLevel is at least high enough to satisfies v.
-// Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default
+// Is safe to use on nil value LogLevelTypes. If LogLevel is nil, will default
// to LogOff comparison.
func (l *LogLevelType) AtLeast(v LogLevelType) bool {
c := l.Value()
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go
new file mode 100644
index 0000000..271da43
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go
@@ -0,0 +1,19 @@
+// +build !appengine,!plan9
+
+package request
+
+import (
+ "net"
+ "os"
+ "syscall"
+)
+
+func isErrConnectionReset(err error) bool {
+ if opErr, ok := err.(*net.OpError); ok {
+ if sysErr, ok := opErr.Err.(*os.SyscallError); ok {
+ return sysErr.Err == syscall.ECONNRESET
+ }
+ }
+
+ return false
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
index 6c14336..802ac88 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
@@ -158,6 +158,37 @@ func (l *HandlerList) RemoveByName(name string) {
}
}
+// SwapNamed will swap out any existing handlers with the same name as the
+// passed in NamedHandler returning true if handlers were swapped. False is
+// returned otherwise.
+func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) {
+ for i := 0; i < len(l.list); i++ {
+ if l.list[i].Name == n.Name {
+ l.list[i].Fn = n.Fn
+ swapped = true
+ }
+ }
+
+ return swapped
+}
+
+// SetBackNamed will replace the named handler if it exists in the handler list.
+// If the handler does not exist the handler will be added to the end of the list.
+func (l *HandlerList) SetBackNamed(n NamedHandler) {
+ if !l.SwapNamed(n) {
+ l.PushBackNamed(n)
+ }
+}
+
+// SetFrontNamed will replace the named handler if it exists in the handler list.
+// If the handler does not exist the handler will be added to the beginning of
+// the list.
+func (l *HandlerList) SetFrontNamed(n NamedHandler) {
+ if !l.SwapNamed(n) {
+ l.PushFrontNamed(n)
+ }
+}
+
// Run executes all handlers in the list with a given request object.
func (l *HandlerList) Run(r *Request) {
for i, h := range l.list {
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
index 1f131df..911c058 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
@@ -21,7 +21,10 @@ const (
// during protocol unmarshaling.
ErrCodeSerialization = "SerializationError"
- // ErrCodeResponseTimeout is the connection timeout error that is recieved
+ // ErrCodeRead is an error that is returned during HTTP reads.
+ ErrCodeRead = "ReadError"
+
+ // ErrCodeResponseTimeout is the connection timeout error that is received
// during body reads.
ErrCodeResponseTimeout = "ResponseTimeout"
@@ -38,23 +41,24 @@ type Request struct {
Handlers Handlers
Retryer
- Time time.Time
- ExpireTime time.Duration
- Operation *Operation
- HTTPRequest *http.Request
- HTTPResponse *http.Response
- Body io.ReadSeeker
- BodyStart int64 // offset from beginning of Body that the request body starts
- Params interface{}
- Error error
- Data interface{}
- RequestID string
- RetryCount int
- Retryable *bool
- RetryDelay time.Duration
- NotHoist bool
- SignedHeaderVals http.Header
- LastSignedAt time.Time
+ Time time.Time
+ ExpireTime time.Duration
+ Operation *Operation
+ HTTPRequest *http.Request
+ HTTPResponse *http.Response
+ Body io.ReadSeeker
+ BodyStart int64 // offset from beginning of Body that the request body starts
+ Params interface{}
+ Error error
+ Data interface{}
+ RequestID string
+ RetryCount int
+ Retryable *bool
+ RetryDelay time.Duration
+ NotHoist bool
+ SignedHeaderVals http.Header
+ LastSignedAt time.Time
+ DisableFollowRedirects bool
context aws.Context
@@ -265,11 +269,17 @@ func (r *Request) Presign(expireTime time.Duration) (string, error) {
return r.HTTPRequest.URL.String(), nil
}
-// PresignRequest behaves just like presign, but hoists all headers and signs them.
-// Also returns the signed hash back to the user
+// PresignRequest behaves just like presign, with the addition of returning a
+// set of headers that were signed.
+//
+// Returns the URL string for the API operation with signature in the query string,
+// and the HTTP headers that were included in the signature. These headers must
+// be included in any HTTP request made with the presigned URL.
+//
+// To prevent hoisting any headers to the query string set NotHoist to true on
+// this Request value prior to calling PresignRequest.
func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) {
r.ExpireTime = expireTime
- r.NotHoist = true
r.Sign()
if r.Error != nil {
return "", nil, r.Error
@@ -334,10 +344,7 @@ func (r *Request) Sign() error {
return r.Error
}
-// ResetBody rewinds the request body backto its starting position, and
-// set's the HTTP Request body reference. When the body is read prior
-// to being sent in the HTTP request it will need to be rewound.
-func (r *Request) ResetBody() {
+func (r *Request) getNextRequestBody() (io.ReadCloser, error) {
if r.safeBody != nil {
r.safeBody.Close()
}
@@ -359,14 +366,14 @@ func (r *Request) ResetBody() {
// Related golang/go#18257
l, err := computeBodyLength(r.Body)
if err != nil {
- r.Error = awserr.New(ErrCodeSerialization, "failed to compute request body size", err)
- return
+ return nil, awserr.New(ErrCodeSerialization, "failed to compute request body size", err)
}
+ var body io.ReadCloser
if l == 0 {
- r.HTTPRequest.Body = noBodyReader
+ body = NoBody
} else if l > 0 {
- r.HTTPRequest.Body = r.safeBody
+ body = r.safeBody
} else {
// Hack to prevent sending bodies for methods where the body
// should be ignored by the server. Sending bodies on these
@@ -378,11 +385,13 @@ func (r *Request) ResetBody() {
// a io.Reader that was not also an io.Seeker.
switch r.Operation.HTTPMethod {
case "GET", "HEAD", "DELETE":
- r.HTTPRequest.Body = noBodyReader
+ body = NoBody
default:
- r.HTTPRequest.Body = r.safeBody
+ body = r.safeBody
}
}
+
+ return body, nil
}
// Attempts to compute the length of the body of the reader using the
@@ -484,7 +493,7 @@ func (r *Request) Send() error {
r.Handlers.Retry.Run(r)
r.Handlers.AfterRetry.Run(r)
if r.Error != nil {
- debugLogReqError(r, "Send Request", false, r.Error)
+ debugLogReqError(r, "Send Request", false, err)
return r.Error
}
debugLogReqError(r, "Send Request", true, err)
@@ -493,12 +502,13 @@ func (r *Request) Send() error {
r.Handlers.UnmarshalMeta.Run(r)
r.Handlers.ValidateResponse.Run(r)
if r.Error != nil {
- err := r.Error
r.Handlers.UnmarshalError.Run(r)
+ err := r.Error
+
r.Handlers.Retry.Run(r)
r.Handlers.AfterRetry.Run(r)
if r.Error != nil {
- debugLogReqError(r, "Validate Response", false, r.Error)
+ debugLogReqError(r, "Validate Response", false, err)
return r.Error
}
debugLogReqError(r, "Validate Response", true, err)
@@ -511,7 +521,7 @@ func (r *Request) Send() error {
r.Handlers.Retry.Run(r)
r.Handlers.AfterRetry.Run(r)
if r.Error != nil {
- debugLogReqError(r, "Unmarshal Response", false, r.Error)
+ debugLogReqError(r, "Unmarshal Response", false, err)
return r.Error
}
debugLogReqError(r, "Unmarshal Response", true, err)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go
index 1323af9..869b97a 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go
@@ -16,6 +16,24 @@ func (noBody) Read([]byte) (int, error) { return 0, io.EOF }
func (noBody) Close() error { return nil }
func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil }
-// Is an empty reader that will trigger the Go HTTP client to not include
+// NoBody is an empty reader that will trigger the Go HTTP client to not include
// and body in the HTTP request.
-var noBodyReader = noBody{}
+var NoBody = noBody{}
+
+// ResetBody rewinds the request body back to its starting position, and
+// set's the HTTP Request body reference. When the body is read prior
+// to being sent in the HTTP request it will need to be rewound.
+//
+// ResetBody will automatically be called by the SDK's build handler, but if
+// the request is being used directly ResetBody must be called before the request
+// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically
+// call ResetBody.
+func (r *Request) ResetBody() {
+ body, err := r.getNextRequestBody()
+ if err != nil {
+ r.Error = err
+ return
+ }
+
+ r.HTTPRequest.Body = body
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go
index 8b963f4..c32fc69 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go
@@ -2,8 +2,32 @@
package request
-import "net/http"
+import (
+ "net/http"
+)
-// Is a http.NoBody reader instructing Go HTTP client to not include
+// NoBody is a http.NoBody reader instructing Go HTTP client to not include
// and body in the HTTP request.
-var noBodyReader = http.NoBody
+var NoBody = http.NoBody
+
+// ResetBody rewinds the request body back to its starting position, and
+// set's the HTTP Request body reference. When the body is read prior
+// to being sent in the HTTP request it will need to be rewound.
+//
+// ResetBody will automatically be called by the SDK's build handler, but if
+// the request is being used directly ResetBody must be called before the request
+// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically
+// call ResetBody.
+//
+// Will also set the Go 1.8's http.Request.GetBody member to allow retrying
+// PUT/POST redirects.
+func (r *Request) ResetBody() {
+ body, err := r.getNextRequestBody()
+ if err != nil {
+ r.Error = err
+ return
+ }
+
+ r.HTTPRequest.Body = body
+ r.HTTPRequest.GetBody = r.getNextRequestBody
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
index 632cd70..f35fef2 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
@@ -1,9 +1,6 @@
package request
import (
- "net"
- "os"
- "syscall"
"time"
"github.com/aws/aws-sdk-go/aws"
@@ -11,7 +8,7 @@ import (
)
// Retryer is an interface to control retry logic for a given service.
-// The default implementation used by most services is the service.DefaultRetryer
+// The default implementation used by most services is the client.DefaultRetryer
// structure, which contains basic retry logic using exponential backoff.
type Retryer interface {
RetryRules(*Request) time.Duration
@@ -41,7 +38,6 @@ var throttleCodes = map[string]struct{}{
"ThrottlingException": {},
"RequestLimitExceeded": {},
"RequestThrottled": {},
- "LimitExceededException": {}, // Deleting 10+ DynamoDb tables at once
"TooManyRequestsException": {}, // Lambda functions
"PriorRequestNotComplete": {}, // Route53
}
@@ -73,7 +69,25 @@ func isCodeExpiredCreds(code string) bool {
return ok
}
-func isSerializationErrorRetryable(err error) bool {
+var validParentCodes = map[string]struct{}{
+ ErrCodeSerialization: {},
+ ErrCodeRead: {},
+}
+
+type temporaryError interface {
+ Temporary() bool
+}
+
+func isNestedErrorRetryable(parentErr awserr.Error) bool {
+ if parentErr == nil {
+ return false
+ }
+
+ if _, ok := validParentCodes[parentErr.Code()]; !ok {
+ return false
+ }
+
+ err := parentErr.OrigErr()
if err == nil {
return false
}
@@ -82,46 +96,66 @@ func isSerializationErrorRetryable(err error) bool {
return isCodeRetryable(aerr.Code())
}
- if opErr, ok := err.(*net.OpError); ok {
- if sysErr, ok := opErr.Err.(*os.SyscallError); ok {
- return sysErr.Err == syscall.ECONNRESET
- }
+ if t, ok := err.(temporaryError); ok {
+ return t.Temporary()
}
- return false
+ return isErrConnectionReset(err)
}
// IsErrorRetryable returns whether the error is retryable, based on its Code.
-// Returns false if the request has no Error set.
-func (r *Request) IsErrorRetryable() bool {
- if r.Error != nil {
- if err, ok := r.Error.(awserr.Error); ok && err.Code() != ErrCodeSerialization {
- return isCodeRetryable(err.Code())
- } else if ok {
- return isSerializationErrorRetryable(err.OrigErr())
+// Returns false if error is nil.
+func IsErrorRetryable(err error) bool {
+ if err != nil {
+ if aerr, ok := err.(awserr.Error); ok {
+ return isCodeRetryable(aerr.Code()) || isNestedErrorRetryable(aerr)
}
}
return false
}
// IsErrorThrottle returns whether the error is to be throttled based on its code.
-// Returns false if the request has no Error set
-func (r *Request) IsErrorThrottle() bool {
- if r.Error != nil {
- if err, ok := r.Error.(awserr.Error); ok {
- return isCodeThrottle(err.Code())
+// Returns false if error is nil.
+func IsErrorThrottle(err error) bool {
+ if err != nil {
+ if aerr, ok := err.(awserr.Error); ok {
+ return isCodeThrottle(aerr.Code())
}
}
return false
}
-// IsErrorExpired returns whether the error code is a credential expiry error.
-// Returns false if the request has no Error set.
-func (r *Request) IsErrorExpired() bool {
- if r.Error != nil {
- if err, ok := r.Error.(awserr.Error); ok {
- return isCodeExpiredCreds(err.Code())
+// IsErrorExpiredCreds returns whether the error code is a credential expiry error.
+// Returns false if error is nil.
+func IsErrorExpiredCreds(err error) bool {
+ if err != nil {
+ if aerr, ok := err.(awserr.Error); ok {
+ return isCodeExpiredCreds(aerr.Code())
}
}
return false
}
+
+// IsErrorRetryable returns whether the error is retryable, based on its Code.
+// Returns false if the request has no Error set.
+//
+// Alias for the utility function IsErrorRetryable
+func (r *Request) IsErrorRetryable() bool {
+ return IsErrorRetryable(r.Error)
+}
+
+// IsErrorThrottle returns whether the error is to be throttled based on its code.
+// Returns false if the request has no Error set
+//
+// Alias for the utility function IsErrorThrottle
+func (r *Request) IsErrorThrottle() bool {
+ return IsErrorThrottle(r.Error)
+}
+
+// IsErrorExpired returns whether the error code is a credential expiry error.
+// Returns false if the request has no Error set.
+//
+// Alias for the utility function IsErrorExpiredCreds
+func (r *Request) IsErrorExpired() bool {
+ return IsErrorExpiredCreds(r.Error)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
index 2520286..4012462 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
@@ -220,7 +220,7 @@ type ErrParamMinLen struct {
func NewErrParamMinLen(field string, min int) *ErrParamMinLen {
return &ErrParamMinLen{
errInvalidParam: errInvalidParam{
- code: ParamMinValueErrCode,
+ code: ParamMinLenErrCode,
field: field,
msg: fmt.Sprintf("minimum field size of %v", min),
},
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go
index 354c381..4601f88 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go
@@ -66,8 +66,8 @@ func WithWaiterRequestOptions(opts ...Option) WaiterOption {
}
}
-// A Waiter provides the functionality to performing blocking call which will
-// wait for an resource state to be satisfied a service.
+// A Waiter provides the functionality to perform a blocking call which will
+// wait for a resource state to be satisfied by a service.
//
// This type should not be used directly. The API operations provided in the
// service packages prefixed with "WaitUntil" should be used instead.
@@ -79,8 +79,9 @@ type Waiter struct {
MaxAttempts int
Delay WaiterDelay
- RequestOptions []Option
- NewRequest func([]Option) (*Request, error)
+ RequestOptions []Option
+ NewRequest func([]Option) (*Request, error)
+ SleepWithContext func(aws.Context, time.Duration) error
}
// ApplyOptions updates the waiter with the list of waiter options provided.
@@ -178,14 +179,8 @@ func (w Waiter) WaitWithContext(ctx aws.Context) error {
// See if any of the acceptors match the request's response, or error
for _, a := range w.Acceptors {
- var matched bool
- matched, err = a.match(w.Name, w.Logger, req, err)
- if err != nil {
- // Error occurred during current waiter call
- return err
- } else if matched {
- // Match was found can stop here and return
- return nil
+ if matched, matchErr := a.match(w.Name, w.Logger, req, err); matched {
+ return matchErr
}
}
@@ -201,8 +196,15 @@ func (w Waiter) WaitWithContext(ctx aws.Context) error {
if sleepFn := req.Config.SleepDelay; sleepFn != nil {
// Support SleepDelay for backwards compatibility and testing
sleepFn(delay)
- } else if err := aws.SleepWithContext(ctx, delay); err != nil {
- return awserr.New(CanceledErrorCode, "waiter context canceled", err)
+ } else {
+ sleepCtxFn := w.SleepWithContext
+ if sleepCtxFn == nil {
+ sleepCtxFn = aws.SleepWithContext
+ }
+
+ if err := sleepCtxFn(ctx, delay); err != nil {
+ return awserr.New(CanceledErrorCode, "waiter context canceled", err)
+ }
}
}
@@ -274,7 +276,7 @@ func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err erro
return true, nil
case FailureWaiterState:
// Waiter failure state triggered
- return false, awserr.New("ResourceNotReady",
+ return true, awserr.New(WaiterResourceNotReadyErrorCode,
"failed waiting for successful resource state", err)
case RetryWaiterState:
// clear the error and retry the operation
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
index 2fe35e7..ea7b886 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
@@ -124,9 +124,8 @@ file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both
files have the same format.
If both config files are present the configuration from both files will be
-read. The Session will be created from configuration values from the shared
-credentials file (~/.aws/credentials) over those in the shared credentials
-file (~/.aws/config).
+read. The Session will be created from configuration values from the shared
+credentials file (~/.aws/credentials) over those in the shared config file (~/.aws/config).
Credentials are the values the SDK should use for authenticating requests with
AWS Services. They arfrom a configuration file will need to include both
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
index e6278a7..f1adcf4 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
@@ -2,12 +2,14 @@ package session
import (
"os"
- "path/filepath"
"strconv"
"github.com/aws/aws-sdk-go/aws/credentials"
)
+// EnvProviderName provides a name of the provider when config is loaded from environment.
+const EnvProviderName = "EnvConfigCredentials"
+
// envConfig is a collection of environment values the SDK will read
// setup config from. All environment values are optional. But some values
// such as credentials require multiple values to be complete or the values
@@ -77,7 +79,7 @@ type envConfig struct {
SharedConfigFile string
// Sets the path to a custom Credentials Authroity (CA) Bundle PEM file
- // that the SDK will use instead of the the system's root CA bundle.
+ // that the SDK will use instead of the system's root CA bundle.
// Only use this if you want to configure the SDK to use a custom set
// of CAs.
//
@@ -116,6 +118,12 @@ var (
"AWS_PROFILE",
"AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set
}
+ sharedCredsFileEnvKey = []string{
+ "AWS_SHARED_CREDENTIALS_FILE",
+ }
+ sharedConfigFileEnvKey = []string{
+ "AWS_CONFIG_FILE",
+ }
)
// loadEnvConfig retrieves the SDK's environment configuration.
@@ -152,7 +160,7 @@ func envConfigLoad(enableSharedConfig bool) envConfig {
if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 {
cfg.Creds = credentials.Value{}
} else {
- cfg.Creds.ProviderName = "EnvConfigCredentials"
+ cfg.Creds.ProviderName = EnvProviderName
}
regionKeys := regionEnvKeys
@@ -165,8 +173,8 @@ func envConfigLoad(enableSharedConfig bool) envConfig {
setFromEnvVal(&cfg.Region, regionKeys)
setFromEnvVal(&cfg.Profile, profileKeys)
- cfg.SharedCredentialsFile = sharedCredentialsFilename()
- cfg.SharedConfigFile = sharedConfigFilename()
+ setFromEnvVal(&cfg.SharedCredentialsFile, sharedCredsFileEnvKey)
+ setFromEnvVal(&cfg.SharedConfigFile, sharedConfigFileEnvKey)
cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE")
@@ -181,28 +189,3 @@ func setFromEnvVal(dst *string, keys []string) {
}
}
}
-
-func sharedCredentialsFilename() string {
- if name := os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(name) > 0 {
- return name
- }
-
- return filepath.Join(userHomeDir(), ".aws", "credentials")
-}
-
-func sharedConfigFilename() string {
- if name := os.Getenv("AWS_CONFIG_FILE"); len(name) > 0 {
- return name
- }
-
- return filepath.Join(userHomeDir(), ".aws", "config")
-}
-
-func userHomeDir() string {
- homeDir := os.Getenv("HOME") // *nix
- if len(homeDir) == 0 { // windows
- homeDir = os.Getenv("USERPROFILE")
- }
-
- return homeDir
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
index 96c740d..9f75d5a 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
@@ -155,6 +155,10 @@ type Options struct {
// and enable or disable the shared config functionality.
SharedConfigState SharedConfigState
+ // Ordered list of files the session will load configuration from.
+ // It will override environment variable AWS_SHARED_CREDENTIALS_FILE, AWS_CONFIG_FILE.
+ SharedConfigFiles []string
+
// When the SDK's shared config is configured to assume a role with MFA
// this option is required in order to provide the mechanism that will
// retrieve the MFA token. There is no default value for this field. If
@@ -218,7 +222,7 @@ type Options struct {
//
// // Force enable Shared Config support
// sess := session.Must(session.NewSessionWithOptions(session.Options{
-// SharedConfigState: SharedConfigEnable,
+// SharedConfigState: session.SharedConfigEnable,
// }))
func NewSessionWithOptions(opts Options) (*Session, error) {
var envCfg envConfig
@@ -239,6 +243,13 @@ func NewSessionWithOptions(opts Options) (*Session, error) {
envCfg.EnableSharedConfig = true
}
+ if len(envCfg.SharedCredentialsFile) == 0 {
+ envCfg.SharedCredentialsFile = defaults.SharedCredentialsFilename()
+ }
+ if len(envCfg.SharedConfigFile) == 0 {
+ envCfg.SharedConfigFile = defaults.SharedConfigFilename()
+ }
+
// Only use AWS_CA_BUNDLE if session option is not provided.
if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil {
f, err := os.Open(envCfg.CustomCABundle)
@@ -304,13 +315,18 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session,
userCfg := &aws.Config{}
userCfg.MergeIn(cfgs...)
- // Order config files will be loaded in with later files overwriting
+ // Ordered config files will be loaded in with later files overwriting
// previous config file values.
- cfgFiles := []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile}
- if !envCfg.EnableSharedConfig {
- // The shared config file (~/.aws/config) is only loaded if instructed
- // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG).
- cfgFiles = cfgFiles[1:]
+ var cfgFiles []string
+ if opts.SharedConfigFiles != nil {
+ cfgFiles = opts.SharedConfigFiles
+ } else {
+ cfgFiles = []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile}
+ if !envCfg.EnableSharedConfig {
+ // The shared config file (~/.aws/config) is only loaded if instructed
+ // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG).
+ cfgFiles = cfgFiles[1:]
+ }
}
// Load additional config from file(s)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
index b58076f..09c8e5b 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go
@@ -113,7 +113,7 @@ func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) {
f, err := ini.Load(b)
if err != nil {
- return nil, SharedConfigLoadError{Filename: filename}
+ return nil, SharedConfigLoadError{Filename: filename, Err: err}
}
files = append(files, sharedConfigFile{
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
index 434ac87..15da572 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
@@ -45,7 +45,7 @@
// If signing a request intended for HTTP2 server, and you're using Go 1.6.2
// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the
// request URL. https://github.com/golang/go/issues/16847 points to a bug in
-// Go pre 1.8 that failes to make HTTP2 requests using absolute URL in the HTTP
+// Go pre 1.8 that fails to make HTTP2 requests using absolute URL in the HTTP
// message. URL.Opaque generally will force Go to make requests with absolute URL.
// URL.RawPath does not do this, but RawPath must be a valid escaping of Path
// or url.EscapedPath will ignore the RawPath escaping.
@@ -55,7 +55,6 @@
package v4
import (
- "bytes"
"crypto/hmac"
"crypto/sha256"
"encoding/hex"
@@ -402,7 +401,7 @@ var SignRequestHandler = request.NamedHandler{
}
// SignSDKRequest signs an AWS request with the V4 signature. This
-// request handler is bested used only with the SDK's built in service client's
+// request handler should only be used with the SDK's built in service client's
// API operation requests.
//
// This function should not be used on its on its own, but in conjunction with
@@ -503,6 +502,8 @@ func (ctx *signingCtx) build(disableHeaderHoisting bool) {
ctx.buildTime() // no depends
ctx.buildCredentialString() // no depends
+ ctx.buildBodyDigest()
+
unsignedHeaders := ctx.Request.Header
if ctx.isPresign {
if !disableHeaderHoisting {
@@ -514,7 +515,6 @@ func (ctx *signingCtx) build(disableHeaderHoisting bool) {
}
}
- ctx.buildBodyDigest()
ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders)
ctx.buildCanonicalString() // depends on canon headers / signed headers
ctx.buildStringToSign() // depends on canon string
@@ -604,14 +604,18 @@ func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) {
headerValues := make([]string, len(headers))
for i, k := range headers {
if k == "host" {
- headerValues[i] = "host:" + ctx.Request.URL.Host
+ if ctx.Request.Host != "" {
+ headerValues[i] = "host:" + ctx.Request.Host
+ } else {
+ headerValues[i] = "host:" + ctx.Request.URL.Host
+ }
} else {
headerValues[i] = k + ":" +
strings.Join(ctx.SignedHeaderVals[k], ",")
}
}
-
- ctx.canonicalHeaders = strings.Join(stripExcessSpaces(headerValues), "\n")
+ stripExcessSpaces(headerValues)
+ ctx.canonicalHeaders = strings.Join(headerValues, "\n")
}
func (ctx *signingCtx) buildCanonicalString() {
@@ -713,49 +717,46 @@ func makeSha256Reader(reader io.ReadSeeker) []byte {
return hash.Sum(nil)
}
-const doubleSpaces = " "
+const doubleSpace = " "
-var doubleSpaceBytes = []byte(doubleSpaces)
-
-func stripExcessSpaces(headerVals []string) []string {
- vals := make([]string, len(headerVals))
- for i, str := range headerVals {
- // Trim leading and trailing spaces
- trimmed := strings.TrimSpace(str)
+// stripExcessSpaces will rewrite the passed in slice's string values to not
+// contain muliple side-by-side spaces.
+func stripExcessSpaces(vals []string) {
+ var j, k, l, m, spaces int
+ for i, str := range vals {
+ // Trim trailing spaces
+ for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- {
+ }
- idx := strings.Index(trimmed, doubleSpaces)
- var buf []byte
- for idx > -1 {
- // Multiple adjacent spaces found
- if buf == nil {
- // first time create the buffer
- buf = []byte(trimmed)
- }
+ // Trim leading spaces
+ for k = 0; k < j && str[k] == ' '; k++ {
+ }
+ str = str[k : j+1]
- stripToIdx := -1
- for j := idx + 1; j < len(buf); j++ {
- if buf[j] != ' ' {
- buf = append(buf[:idx+1], buf[j:]...)
- stripToIdx = j
- break
- }
- }
+ // Strip multiple spaces.
+ j = strings.Index(str, doubleSpace)
+ if j < 0 {
+ vals[i] = str
+ continue
+ }
- if stripToIdx >= 0 {
- idx = bytes.Index(buf[stripToIdx:], doubleSpaceBytes)
- if idx >= 0 {
- idx += stripToIdx
+ buf := []byte(str)
+ for k, m, l = j, j, len(buf); k < l; k++ {
+ if buf[k] == ' ' {
+ if spaces == 0 {
+ // First space.
+ buf[m] = buf[k]
+ m++
}
+ spaces++
} else {
- idx = -1
+ // End of multiple spaces.
+ spaces = 0
+ buf[m] = buf[k]
+ m++
}
}
- if buf != nil {
- vals[i] = string(buf)
- } else {
- vals[i] = trimmed
- }
+ vals[i] = string(buf[:m])
}
- return vals
}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url.go b/vendor/github.com/aws/aws-sdk-go/aws/url.go
new file mode 100644
index 0000000..6192b24
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/url.go
@@ -0,0 +1,12 @@
+// +build go1.8
+
+package aws
+
+import "net/url"
+
+// URLHostname will extract the Hostname without port from the URL value.
+//
+// Wrapper of net/url#URL.Hostname for backwards Go version compatibility.
+func URLHostname(url *url.URL) string {
+ return url.Hostname()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go
new file mode 100644
index 0000000..0210d27
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go
@@ -0,0 +1,29 @@
+// +build !go1.8
+
+package aws
+
+import (
+ "net/url"
+ "strings"
+)
+
+// URLHostname will extract the Hostname without port from the URL value.
+//
+// Copy of Go 1.8's net/url#URL.Hostname functionality.
+func URLHostname(url *url.URL) string {
+ return stripPort(url.Host)
+
+}
+
+// stripPort is copy of Go 1.8 url#URL.Hostname functionality.
+// https://golang.org/src/net/url/url.go
+func stripPort(hostport string) string {
+ colon := strings.IndexByte(hostport, ':')
+ if colon == -1 {
+ return hostport
+ }
+ if i := strings.IndexByte(hostport, ']'); i != -1 {
+ return strings.TrimPrefix(hostport[:i], "[")
+ }
+ return hostport[:colon]
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go
index d1b587d..aceb0c9 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/version.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go
@@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
-const SDKVersion = "1.8.11"
+const SDKVersion = "1.12.12"
diff --git a/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go
new file mode 100644
index 0000000..ebcbc2b
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/internal/shareddefaults/shared_config.go
@@ -0,0 +1,40 @@
+package shareddefaults
+
+import (
+ "os"
+ "path/filepath"
+ "runtime"
+)
+
+// SharedCredentialsFilename returns the SDK's default file path
+// for the shared credentials file.
+//
+// Builds the shared config file path based on the OS's platform.
+//
+// - Linux/Unix: $HOME/.aws/credentials
+// - Windows: %USERPROFILE%\.aws\credentials
+func SharedCredentialsFilename() string {
+ return filepath.Join(UserHomeDir(), ".aws", "credentials")
+}
+
+// SharedConfigFilename returns the SDK's default file path for
+// the shared config file.
+//
+// Builds the shared config file path based on the OS's platform.
+//
+// - Linux/Unix: $HOME/.aws/config
+// - Windows: %USERPROFILE%\.aws\config
+func SharedConfigFilename() string {
+ return filepath.Join(UserHomeDir(), ".aws", "config")
+}
+
+// UserHomeDir returns the home directory for the user the process is
+// running under.
+func UserHomeDir() string {
+ if runtime.GOOS == "windows" { // Windows
+ return os.Getenv("USERPROFILE")
+ }
+
+ // *nix
+ return os.Getenv("HOME")
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
index 524ca95..5ce9cba 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
@@ -121,6 +121,10 @@ func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string
return nil
}
+ if _, ok := value.Interface().([]byte); ok {
+ return q.parseScalar(v, value, prefix, tag)
+ }
+
// check for unflattened list member
if !q.isEC2 && tag.Get("flattened") == "" {
if listName := tag.Get("locationNameList"); listName == "" {
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
index c74c191..7091b45 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go
@@ -131,7 +131,6 @@ func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag refl
continue
}
-
mTag := field.Tag
if mTag.Get("location") != "" { // skip non-body members
continue
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
index 64b6ddd..8758462 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go
@@ -15,7 +15,10 @@ import (
// needs to match the shape of the XML expected to be decoded.
// If the shape doesn't match unmarshaling will fail.
func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error {
- n, _ := XMLToStruct(d, nil)
+ n, err := XMLToStruct(d, nil)
+ if err != nil {
+ return err
+ }
if n.Children != nil {
for _, root := range n.Children {
for _, c := range root {
@@ -23,7 +26,7 @@ func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error {
c = wrappedChild[0] // pull out wrapped element
}
- err := parse(reflect.ValueOf(v), c, "")
+ err = parse(reflect.ValueOf(v), c, "")
if err != nil {
if err == io.EOF {
return nil
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
index 3112512..3e970b6 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go
@@ -40,11 +40,16 @@ func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) {
out := &XMLNode{}
for {
tok, err := d.Token()
- if tok == nil || err == io.EOF {
- break
- }
if err != nil {
- return out, err
+ if err == io.EOF {
+ break
+ } else {
+ return out, err
+ }
+ }
+
+ if tok == nil {
+ break
}
switch typed := tok.(type) {
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go
index 3f0fc2f..1d4fa38 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go
@@ -1,6 +1,5 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
-// Package s3 provides a client for Amazon Simple Storage Service.
package s3
import (
@@ -19,19 +18,18 @@ const opAbortMultipartUpload = "AbortMultipartUpload"
// AbortMultipartUploadRequest generates a "aws/request.Request" representing the
// client's request for the AbortMultipartUpload operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
//
-// See AbortMultipartUpload for usage and error information.
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the AbortMultipartUpload method directly
-// instead.
+// See AbortMultipartUpload for more information on using the AbortMultipartUpload
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the AbortMultipartUploadRequest method.
// req, resp := client.AbortMultipartUploadRequest(params)
@@ -103,19 +101,18 @@ const opCompleteMultipartUpload = "CompleteMultipartUpload"
// CompleteMultipartUploadRequest generates a "aws/request.Request" representing the
// client's request for the CompleteMultipartUpload operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See CompleteMultipartUpload for usage and error information.
+// See CompleteMultipartUpload for more information on using the CompleteMultipartUpload
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the CompleteMultipartUpload method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the CompleteMultipartUploadRequest method.
// req, resp := client.CompleteMultipartUploadRequest(params)
@@ -178,19 +175,18 @@ const opCopyObject = "CopyObject"
// CopyObjectRequest generates a "aws/request.Request" representing the
// client's request for the CopyObject operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
//
-// See CopyObject for usage and error information.
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the CopyObject method directly
-// instead.
+// See CopyObject for more information on using the CopyObject
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the CopyObjectRequest method.
// req, resp := client.CopyObjectRequest(params)
@@ -259,19 +255,18 @@ const opCreateBucket = "CreateBucket"
// CreateBucketRequest generates a "aws/request.Request" representing the
// client's request for the CreateBucket operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See CreateBucket for usage and error information.
+// See CreateBucket for more information on using the CreateBucket
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the CreateBucket method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the CreateBucketRequest method.
// req, resp := client.CreateBucketRequest(params)
@@ -342,19 +337,18 @@ const opCreateMultipartUpload = "CreateMultipartUpload"
// CreateMultipartUploadRequest generates a "aws/request.Request" representing the
// client's request for the CreateMultipartUpload operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
//
-// See CreateMultipartUpload for usage and error information.
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the CreateMultipartUpload method directly
-// instead.
+// See CreateMultipartUpload for more information on using the CreateMultipartUpload
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the CreateMultipartUploadRequest method.
// req, resp := client.CreateMultipartUploadRequest(params)
@@ -423,19 +417,18 @@ const opDeleteBucket = "DeleteBucket"
// DeleteBucketRequest generates a "aws/request.Request" representing the
// client's request for the DeleteBucket operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See DeleteBucket for usage and error information.
+// See DeleteBucket for more information on using the DeleteBucket
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the DeleteBucket method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the DeleteBucketRequest method.
// req, resp := client.DeleteBucketRequest(params)
@@ -501,19 +494,18 @@ const opDeleteBucketAnalyticsConfiguration = "DeleteBucketAnalyticsConfiguration
// DeleteBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the DeleteBucketAnalyticsConfiguration operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See DeleteBucketAnalyticsConfiguration for usage and error information.
+// See DeleteBucketAnalyticsConfiguration for more information on using the DeleteBucketAnalyticsConfiguration
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the DeleteBucketAnalyticsConfiguration method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the DeleteBucketAnalyticsConfigurationRequest method.
// req, resp := client.DeleteBucketAnalyticsConfigurationRequest(params)
@@ -579,19 +571,18 @@ const opDeleteBucketCors = "DeleteBucketCors"
// DeleteBucketCorsRequest generates a "aws/request.Request" representing the
// client's request for the DeleteBucketCors operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
//
-// See DeleteBucketCors for usage and error information.
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the DeleteBucketCors method directly
-// instead.
+// See DeleteBucketCors for more information on using the DeleteBucketCors
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the DeleteBucketCorsRequest method.
// req, resp := client.DeleteBucketCorsRequest(params)
@@ -656,19 +647,18 @@ const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration
// DeleteBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the DeleteBucketInventoryConfiguration operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See DeleteBucketInventoryConfiguration for usage and error information.
+// See DeleteBucketInventoryConfiguration for more information on using the DeleteBucketInventoryConfiguration
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the DeleteBucketInventoryConfiguration method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the DeleteBucketInventoryConfigurationRequest method.
// req, resp := client.DeleteBucketInventoryConfigurationRequest(params)
@@ -734,19 +724,18 @@ const opDeleteBucketLifecycle = "DeleteBucketLifecycle"
// DeleteBucketLifecycleRequest generates a "aws/request.Request" representing the
// client's request for the DeleteBucketLifecycle operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
//
-// See DeleteBucketLifecycle for usage and error information.
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the DeleteBucketLifecycle method directly
-// instead.
+// See DeleteBucketLifecycle for more information on using the DeleteBucketLifecycle
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the DeleteBucketLifecycleRequest method.
// req, resp := client.DeleteBucketLifecycleRequest(params)
@@ -811,19 +800,18 @@ const opDeleteBucketMetricsConfiguration = "DeleteBucketMetricsConfiguration"
// DeleteBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the DeleteBucketMetricsConfiguration operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See DeleteBucketMetricsConfiguration for usage and error information.
+// See DeleteBucketMetricsConfiguration for more information on using the DeleteBucketMetricsConfiguration
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the DeleteBucketMetricsConfiguration method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the DeleteBucketMetricsConfigurationRequest method.
// req, resp := client.DeleteBucketMetricsConfigurationRequest(params)
@@ -889,19 +877,18 @@ const opDeleteBucketPolicy = "DeleteBucketPolicy"
// DeleteBucketPolicyRequest generates a "aws/request.Request" representing the
// client's request for the DeleteBucketPolicy operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
//
-// See DeleteBucketPolicy for usage and error information.
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the DeleteBucketPolicy method directly
-// instead.
+// See DeleteBucketPolicy for more information on using the DeleteBucketPolicy
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the DeleteBucketPolicyRequest method.
// req, resp := client.DeleteBucketPolicyRequest(params)
@@ -966,19 +953,18 @@ const opDeleteBucketReplication = "DeleteBucketReplication"
// DeleteBucketReplicationRequest generates a "aws/request.Request" representing the
// client's request for the DeleteBucketReplication operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See DeleteBucketReplication for usage and error information.
+// See DeleteBucketReplication for more information on using the DeleteBucketReplication
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the DeleteBucketReplication method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the DeleteBucketReplicationRequest method.
// req, resp := client.DeleteBucketReplicationRequest(params)
@@ -1043,19 +1029,18 @@ const opDeleteBucketTagging = "DeleteBucketTagging"
// DeleteBucketTaggingRequest generates a "aws/request.Request" representing the
// client's request for the DeleteBucketTagging operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See DeleteBucketTagging for usage and error information.
+// See DeleteBucketTagging for more information on using the DeleteBucketTagging
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the DeleteBucketTagging method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the DeleteBucketTaggingRequest method.
// req, resp := client.DeleteBucketTaggingRequest(params)
@@ -1120,19 +1105,18 @@ const opDeleteBucketWebsite = "DeleteBucketWebsite"
// DeleteBucketWebsiteRequest generates a "aws/request.Request" representing the
// client's request for the DeleteBucketWebsite operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
//
-// See DeleteBucketWebsite for usage and error information.
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the DeleteBucketWebsite method directly
-// instead.
+// See DeleteBucketWebsite for more information on using the DeleteBucketWebsite
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the DeleteBucketWebsiteRequest method.
// req, resp := client.DeleteBucketWebsiteRequest(params)
@@ -1197,19 +1181,18 @@ const opDeleteObject = "DeleteObject"
// DeleteObjectRequest generates a "aws/request.Request" representing the
// client's request for the DeleteObject operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See DeleteObject for usage and error information.
+// See DeleteObject for more information on using the DeleteObject
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the DeleteObject method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the DeleteObjectRequest method.
// req, resp := client.DeleteObjectRequest(params)
@@ -1274,19 +1257,18 @@ const opDeleteObjectTagging = "DeleteObjectTagging"
// DeleteObjectTaggingRequest generates a "aws/request.Request" representing the
// client's request for the DeleteObjectTagging operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
//
-// See DeleteObjectTagging for usage and error information.
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the DeleteObjectTagging method directly
-// instead.
+// See DeleteObjectTagging for more information on using the DeleteObjectTagging
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the DeleteObjectTaggingRequest method.
// req, resp := client.DeleteObjectTaggingRequest(params)
@@ -1349,19 +1331,18 @@ const opDeleteObjects = "DeleteObjects"
// DeleteObjectsRequest generates a "aws/request.Request" representing the
// client's request for the DeleteObjects operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See DeleteObjects for usage and error information.
+// See DeleteObjects for more information on using the DeleteObjects
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the DeleteObjects method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the DeleteObjectsRequest method.
// req, resp := client.DeleteObjectsRequest(params)
@@ -1425,19 +1406,18 @@ const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration"
// GetBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketAccelerateConfiguration operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
//
-// See GetBucketAccelerateConfiguration for usage and error information.
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the GetBucketAccelerateConfiguration method directly
-// instead.
+// See GetBucketAccelerateConfiguration for more information on using the GetBucketAccelerateConfiguration
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the GetBucketAccelerateConfigurationRequest method.
// req, resp := client.GetBucketAccelerateConfigurationRequest(params)
@@ -1500,19 +1480,18 @@ const opGetBucketAcl = "GetBucketAcl"
// GetBucketAclRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketAcl operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See GetBucketAcl for usage and error information.
+// See GetBucketAcl for more information on using the GetBucketAcl
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the GetBucketAcl method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the GetBucketAclRequest method.
// req, resp := client.GetBucketAclRequest(params)
@@ -1575,19 +1554,18 @@ const opGetBucketAnalyticsConfiguration = "GetBucketAnalyticsConfiguration"
// GetBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketAnalyticsConfiguration operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See GetBucketAnalyticsConfiguration for usage and error information.
+// See GetBucketAnalyticsConfiguration for more information on using the GetBucketAnalyticsConfiguration
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the GetBucketAnalyticsConfiguration method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the GetBucketAnalyticsConfigurationRequest method.
// req, resp := client.GetBucketAnalyticsConfigurationRequest(params)
@@ -1651,19 +1629,18 @@ const opGetBucketCors = "GetBucketCors"
// GetBucketCorsRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketCors operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
//
-// See GetBucketCors for usage and error information.
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the GetBucketCors method directly
-// instead.
+// See GetBucketCors for more information on using the GetBucketCors
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the GetBucketCorsRequest method.
// req, resp := client.GetBucketCorsRequest(params)
@@ -1726,19 +1703,18 @@ const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration"
// GetBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketInventoryConfiguration operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See GetBucketInventoryConfiguration for usage and error information.
+// See GetBucketInventoryConfiguration for more information on using the GetBucketInventoryConfiguration
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the GetBucketInventoryConfiguration method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the GetBucketInventoryConfigurationRequest method.
// req, resp := client.GetBucketInventoryConfigurationRequest(params)
@@ -1802,19 +1778,18 @@ const opGetBucketLifecycle = "GetBucketLifecycle"
// GetBucketLifecycleRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketLifecycle operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
//
-// See GetBucketLifecycle for usage and error information.
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the GetBucketLifecycle method directly
-// instead.
+// See GetBucketLifecycle for more information on using the GetBucketLifecycle
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the GetBucketLifecycleRequest method.
// req, resp := client.GetBucketLifecycleRequest(params)
@@ -1880,19 +1855,18 @@ const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration"
// GetBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketLifecycleConfiguration operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See GetBucketLifecycleConfiguration for usage and error information.
+// See GetBucketLifecycleConfiguration for more information on using the GetBucketLifecycleConfiguration
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the GetBucketLifecycleConfiguration method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the GetBucketLifecycleConfigurationRequest method.
// req, resp := client.GetBucketLifecycleConfigurationRequest(params)
@@ -1955,19 +1929,18 @@ const opGetBucketLocation = "GetBucketLocation"
// GetBucketLocationRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketLocation operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
//
-// See GetBucketLocation for usage and error information.
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the GetBucketLocation method directly
-// instead.
+// See GetBucketLocation for more information on using the GetBucketLocation
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the GetBucketLocationRequest method.
// req, resp := client.GetBucketLocationRequest(params)
@@ -2030,19 +2003,18 @@ const opGetBucketLogging = "GetBucketLogging"
// GetBucketLoggingRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketLogging operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See GetBucketLogging for usage and error information.
+// See GetBucketLogging for more information on using the GetBucketLogging
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the GetBucketLogging method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the GetBucketLoggingRequest method.
// req, resp := client.GetBucketLoggingRequest(params)
@@ -2106,19 +2078,18 @@ const opGetBucketMetricsConfiguration = "GetBucketMetricsConfiguration"
// GetBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketMetricsConfiguration operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See GetBucketMetricsConfiguration for usage and error information.
+// See GetBucketMetricsConfiguration for more information on using the GetBucketMetricsConfiguration
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the GetBucketMetricsConfiguration method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the GetBucketMetricsConfigurationRequest method.
// req, resp := client.GetBucketMetricsConfigurationRequest(params)
@@ -2182,19 +2153,18 @@ const opGetBucketNotification = "GetBucketNotification"
// GetBucketNotificationRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketNotification operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
//
-// See GetBucketNotification for usage and error information.
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the GetBucketNotification method directly
-// instead.
+// See GetBucketNotification for more information on using the GetBucketNotification
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the GetBucketNotificationRequest method.
// req, resp := client.GetBucketNotificationRequest(params)
@@ -2260,19 +2230,18 @@ const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration
// GetBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketNotificationConfiguration operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See GetBucketNotificationConfiguration for usage and error information.
+// See GetBucketNotificationConfiguration for more information on using the GetBucketNotificationConfiguration
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the GetBucketNotificationConfiguration method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the GetBucketNotificationConfigurationRequest method.
// req, resp := client.GetBucketNotificationConfigurationRequest(params)
@@ -2335,19 +2304,18 @@ const opGetBucketPolicy = "GetBucketPolicy"
// GetBucketPolicyRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketPolicy operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
//
-// See GetBucketPolicy for usage and error information.
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the GetBucketPolicy method directly
-// instead.
+// See GetBucketPolicy for more information on using the GetBucketPolicy
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the GetBucketPolicyRequest method.
// req, resp := client.GetBucketPolicyRequest(params)
@@ -2410,19 +2378,18 @@ const opGetBucketReplication = "GetBucketReplication"
// GetBucketReplicationRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketReplication operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See GetBucketReplication for usage and error information.
+// See GetBucketReplication for more information on using the GetBucketReplication
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the GetBucketReplication method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the GetBucketReplicationRequest method.
// req, resp := client.GetBucketReplicationRequest(params)
@@ -2485,19 +2452,18 @@ const opGetBucketRequestPayment = "GetBucketRequestPayment"
// GetBucketRequestPaymentRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketRequestPayment operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
//
-// See GetBucketRequestPayment for usage and error information.
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the GetBucketRequestPayment method directly
-// instead.
+// See GetBucketRequestPayment for more information on using the GetBucketRequestPayment
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the GetBucketRequestPaymentRequest method.
// req, resp := client.GetBucketRequestPaymentRequest(params)
@@ -2560,19 +2526,18 @@ const opGetBucketTagging = "GetBucketTagging"
// GetBucketTaggingRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketTagging operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See GetBucketTagging for usage and error information.
+// See GetBucketTagging for more information on using the GetBucketTagging
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the GetBucketTagging method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the GetBucketTaggingRequest method.
// req, resp := client.GetBucketTaggingRequest(params)
@@ -2635,19 +2600,18 @@ const opGetBucketVersioning = "GetBucketVersioning"
// GetBucketVersioningRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketVersioning operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See GetBucketVersioning for usage and error information.
+// See GetBucketVersioning for more information on using the GetBucketVersioning
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the GetBucketVersioning method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the GetBucketVersioningRequest method.
// req, resp := client.GetBucketVersioningRequest(params)
@@ -2710,19 +2674,18 @@ const opGetBucketWebsite = "GetBucketWebsite"
// GetBucketWebsiteRequest generates a "aws/request.Request" representing the
// client's request for the GetBucketWebsite operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
//
-// See GetBucketWebsite for usage and error information.
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the GetBucketWebsite method directly
-// instead.
+// See GetBucketWebsite for more information on using the GetBucketWebsite
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the GetBucketWebsiteRequest method.
// req, resp := client.GetBucketWebsiteRequest(params)
@@ -2785,19 +2748,18 @@ const opGetObject = "GetObject"
// GetObjectRequest generates a "aws/request.Request" representing the
// client's request for the GetObject operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See GetObject for usage and error information.
+// See GetObject for more information on using the GetObject
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the GetObject method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the GetObjectRequest method.
// req, resp := client.GetObjectRequest(params)
@@ -2865,19 +2827,18 @@ const opGetObjectAcl = "GetObjectAcl"
// GetObjectAclRequest generates a "aws/request.Request" representing the
// client's request for the GetObjectAcl operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
//
-// See GetObjectAcl for usage and error information.
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the GetObjectAcl method directly
-// instead.
+// See GetObjectAcl for more information on using the GetObjectAcl
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the GetObjectAclRequest method.
// req, resp := client.GetObjectAclRequest(params)
@@ -2945,19 +2906,18 @@ const opGetObjectTagging = "GetObjectTagging"
// GetObjectTaggingRequest generates a "aws/request.Request" representing the
// client's request for the GetObjectTagging operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See GetObjectTagging for usage and error information.
+// See GetObjectTagging for more information on using the GetObjectTagging
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the GetObjectTagging method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the GetObjectTaggingRequest method.
// req, resp := client.GetObjectTaggingRequest(params)
@@ -3020,19 +2980,18 @@ const opGetObjectTorrent = "GetObjectTorrent"
// GetObjectTorrentRequest generates a "aws/request.Request" representing the
// client's request for the GetObjectTorrent operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
//
-// See GetObjectTorrent for usage and error information.
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the GetObjectTorrent method directly
-// instead.
+// See GetObjectTorrent for more information on using the GetObjectTorrent
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the GetObjectTorrentRequest method.
// req, resp := client.GetObjectTorrentRequest(params)
@@ -3095,19 +3054,18 @@ const opHeadBucket = "HeadBucket"
// HeadBucketRequest generates a "aws/request.Request" representing the
// client's request for the HeadBucket operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See HeadBucket for usage and error information.
+// See HeadBucket for more information on using the HeadBucket
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the HeadBucket method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the HeadBucketRequest method.
// req, resp := client.HeadBucketRequest(params)
@@ -3178,19 +3136,18 @@ const opHeadObject = "HeadObject"
// HeadObjectRequest generates a "aws/request.Request" representing the
// client's request for the HeadObject operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See HeadObject for usage and error information.
+// See HeadObject for more information on using the HeadObject
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the HeadObject method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the HeadObjectRequest method.
// req, resp := client.HeadObjectRequest(params)
@@ -3223,17 +3180,15 @@ func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, ou
// object itself. This operation is useful if you're only interested in an object's
// metadata. To use HEAD, you must have READ access to the object.
//
+// See http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#RESTErrorResponses
+// for more information on returned errors.
+//
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
// with awserr.Error's Code and Message methods to get detailed information about
// the error.
//
// See the AWS API reference guide for Amazon Simple Storage Service's
// API operation HeadObject for usage and error information.
-//
-// Returned Error Codes:
-// * ErrCodeNoSuchKey "NoSuchKey"
-// The specified key does not exist.
-//
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject
func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) {
req, out := c.HeadObjectRequest(input)
@@ -3260,19 +3215,18 @@ const opListBucketAnalyticsConfigurations = "ListBucketAnalyticsConfigurations"
// ListBucketAnalyticsConfigurationsRequest generates a "aws/request.Request" representing the
// client's request for the ListBucketAnalyticsConfigurations operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See ListBucketAnalyticsConfigurations for usage and error information.
+// See ListBucketAnalyticsConfigurations for more information on using the ListBucketAnalyticsConfigurations
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the ListBucketAnalyticsConfigurations method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the ListBucketAnalyticsConfigurationsRequest method.
// req, resp := client.ListBucketAnalyticsConfigurationsRequest(params)
@@ -3335,19 +3289,18 @@ const opListBucketInventoryConfigurations = "ListBucketInventoryConfigurations"
// ListBucketInventoryConfigurationsRequest generates a "aws/request.Request" representing the
// client's request for the ListBucketInventoryConfigurations operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
//
-// See ListBucketInventoryConfigurations for usage and error information.
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the ListBucketInventoryConfigurations method directly
-// instead.
+// See ListBucketInventoryConfigurations for more information on using the ListBucketInventoryConfigurations
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the ListBucketInventoryConfigurationsRequest method.
// req, resp := client.ListBucketInventoryConfigurationsRequest(params)
@@ -3410,19 +3363,18 @@ const opListBucketMetricsConfigurations = "ListBucketMetricsConfigurations"
// ListBucketMetricsConfigurationsRequest generates a "aws/request.Request" representing the
// client's request for the ListBucketMetricsConfigurations operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See ListBucketMetricsConfigurations for usage and error information.
+// See ListBucketMetricsConfigurations for more information on using the ListBucketMetricsConfigurations
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the ListBucketMetricsConfigurations method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the ListBucketMetricsConfigurationsRequest method.
// req, resp := client.ListBucketMetricsConfigurationsRequest(params)
@@ -3485,19 +3437,18 @@ const opListBuckets = "ListBuckets"
// ListBucketsRequest generates a "aws/request.Request" representing the
// client's request for the ListBuckets operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See ListBuckets for usage and error information.
+// See ListBuckets for more information on using the ListBuckets
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the ListBuckets method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the ListBucketsRequest method.
// req, resp := client.ListBucketsRequest(params)
@@ -3560,19 +3511,18 @@ const opListMultipartUploads = "ListMultipartUploads"
// ListMultipartUploadsRequest generates a "aws/request.Request" representing the
// client's request for the ListMultipartUploads operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
//
-// See ListMultipartUploads for usage and error information.
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the ListMultipartUploads method directly
-// instead.
+// See ListMultipartUploads for more information on using the ListMultipartUploads
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the ListMultipartUploadsRequest method.
// req, resp := client.ListMultipartUploadsRequest(params)
@@ -3691,19 +3641,18 @@ const opListObjectVersions = "ListObjectVersions"
// ListObjectVersionsRequest generates a "aws/request.Request" representing the
// client's request for the ListObjectVersions operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See ListObjectVersions for usage and error information.
+// See ListObjectVersions for more information on using the ListObjectVersions
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the ListObjectVersions method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the ListObjectVersionsRequest method.
// req, resp := client.ListObjectVersionsRequest(params)
@@ -3822,19 +3771,18 @@ const opListObjects = "ListObjects"
// ListObjectsRequest generates a "aws/request.Request" representing the
// client's request for the ListObjects operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See ListObjects for usage and error information.
+// See ListObjects for more information on using the ListObjects
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the ListObjects method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the ListObjectsRequest method.
// req, resp := client.ListObjectsRequest(params)
@@ -3960,19 +3908,18 @@ const opListObjectsV2 = "ListObjectsV2"
// ListObjectsV2Request generates a "aws/request.Request" representing the
// client's request for the ListObjectsV2 operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
//
-// See ListObjectsV2 for usage and error information.
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the ListObjectsV2 method directly
-// instead.
+// See ListObjectsV2 for more information on using the ListObjectsV2
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the ListObjectsV2Request method.
// req, resp := client.ListObjectsV2Request(params)
@@ -4099,19 +4046,18 @@ const opListParts = "ListParts"
// ListPartsRequest generates a "aws/request.Request" representing the
// client's request for the ListParts operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See ListParts for usage and error information.
+// See ListParts for more information on using the ListParts
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the ListParts method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the ListPartsRequest method.
// req, resp := client.ListPartsRequest(params)
@@ -4230,19 +4176,18 @@ const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration"
// PutBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketAccelerateConfiguration operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
//
-// See PutBucketAccelerateConfiguration for usage and error information.
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the PutBucketAccelerateConfiguration method directly
-// instead.
+// See PutBucketAccelerateConfiguration for more information on using the PutBucketAccelerateConfiguration
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the PutBucketAccelerateConfigurationRequest method.
// req, resp := client.PutBucketAccelerateConfigurationRequest(params)
@@ -4307,19 +4252,18 @@ const opPutBucketAcl = "PutBucketAcl"
// PutBucketAclRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketAcl operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See PutBucketAcl for usage and error information.
+// See PutBucketAcl for more information on using the PutBucketAcl
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the PutBucketAcl method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the PutBucketAclRequest method.
// req, resp := client.PutBucketAclRequest(params)
@@ -4384,19 +4328,18 @@ const opPutBucketAnalyticsConfiguration = "PutBucketAnalyticsConfiguration"
// PutBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketAnalyticsConfiguration operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
//
-// See PutBucketAnalyticsConfiguration for usage and error information.
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the PutBucketAnalyticsConfiguration method directly
-// instead.
+// See PutBucketAnalyticsConfiguration for more information on using the PutBucketAnalyticsConfiguration
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the PutBucketAnalyticsConfigurationRequest method.
// req, resp := client.PutBucketAnalyticsConfigurationRequest(params)
@@ -4462,19 +4405,18 @@ const opPutBucketCors = "PutBucketCors"
// PutBucketCorsRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketCors operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See PutBucketCors for usage and error information.
+// See PutBucketCors for more information on using the PutBucketCors
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the PutBucketCors method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the PutBucketCorsRequest method.
// req, resp := client.PutBucketCorsRequest(params)
@@ -4539,19 +4481,18 @@ const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration"
// PutBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketInventoryConfiguration operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See PutBucketInventoryConfiguration for usage and error information.
+// See PutBucketInventoryConfiguration for more information on using the PutBucketInventoryConfiguration
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the PutBucketInventoryConfiguration method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the PutBucketInventoryConfigurationRequest method.
// req, resp := client.PutBucketInventoryConfigurationRequest(params)
@@ -4617,19 +4558,18 @@ const opPutBucketLifecycle = "PutBucketLifecycle"
// PutBucketLifecycleRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketLifecycle operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
//
-// See PutBucketLifecycle for usage and error information.
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the PutBucketLifecycle method directly
-// instead.
+// See PutBucketLifecycle for more information on using the PutBucketLifecycle
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the PutBucketLifecycleRequest method.
// req, resp := client.PutBucketLifecycleRequest(params)
@@ -4697,19 +4637,18 @@ const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration"
// PutBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketLifecycleConfiguration operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See PutBucketLifecycleConfiguration for usage and error information.
+// See PutBucketLifecycleConfiguration for more information on using the PutBucketLifecycleConfiguration
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the PutBucketLifecycleConfiguration method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the PutBucketLifecycleConfigurationRequest method.
// req, resp := client.PutBucketLifecycleConfigurationRequest(params)
@@ -4775,19 +4714,18 @@ const opPutBucketLogging = "PutBucketLogging"
// PutBucketLoggingRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketLogging operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
//
-// See PutBucketLogging for usage and error information.
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the PutBucketLogging method directly
-// instead.
+// See PutBucketLogging for more information on using the PutBucketLogging
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the PutBucketLoggingRequest method.
// req, resp := client.PutBucketLoggingRequest(params)
@@ -4854,19 +4792,18 @@ const opPutBucketMetricsConfiguration = "PutBucketMetricsConfiguration"
// PutBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketMetricsConfiguration operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See PutBucketMetricsConfiguration for usage and error information.
+// See PutBucketMetricsConfiguration for more information on using the PutBucketMetricsConfiguration
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the PutBucketMetricsConfiguration method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the PutBucketMetricsConfigurationRequest method.
// req, resp := client.PutBucketMetricsConfigurationRequest(params)
@@ -4932,19 +4869,18 @@ const opPutBucketNotification = "PutBucketNotification"
// PutBucketNotificationRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketNotification operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
//
-// See PutBucketNotification for usage and error information.
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the PutBucketNotification method directly
-// instead.
+// See PutBucketNotification for more information on using the PutBucketNotification
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the PutBucketNotificationRequest method.
// req, resp := client.PutBucketNotificationRequest(params)
@@ -5012,19 +4948,18 @@ const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration
// PutBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketNotificationConfiguration operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See PutBucketNotificationConfiguration for usage and error information.
+// See PutBucketNotificationConfiguration for more information on using the PutBucketNotificationConfiguration
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the PutBucketNotificationConfiguration method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the PutBucketNotificationConfigurationRequest method.
// req, resp := client.PutBucketNotificationConfigurationRequest(params)
@@ -5089,19 +5024,18 @@ const opPutBucketPolicy = "PutBucketPolicy"
// PutBucketPolicyRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketPolicy operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See PutBucketPolicy for usage and error information.
+// See PutBucketPolicy for more information on using the PutBucketPolicy
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the PutBucketPolicy method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the PutBucketPolicyRequest method.
// req, resp := client.PutBucketPolicyRequest(params)
@@ -5167,19 +5101,18 @@ const opPutBucketReplication = "PutBucketReplication"
// PutBucketReplicationRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketReplication operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
//
-// See PutBucketReplication for usage and error information.
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the PutBucketReplication method directly
-// instead.
+// See PutBucketReplication for more information on using the PutBucketReplication
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the PutBucketReplicationRequest method.
// req, resp := client.PutBucketReplicationRequest(params)
@@ -5245,19 +5178,18 @@ const opPutBucketRequestPayment = "PutBucketRequestPayment"
// PutBucketRequestPaymentRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketRequestPayment operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See PutBucketRequestPayment for usage and error information.
+// See PutBucketRequestPayment for more information on using the PutBucketRequestPayment
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the PutBucketRequestPayment method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the PutBucketRequestPaymentRequest method.
// req, resp := client.PutBucketRequestPaymentRequest(params)
@@ -5326,19 +5258,18 @@ const opPutBucketTagging = "PutBucketTagging"
// PutBucketTaggingRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketTagging operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
//
-// See PutBucketTagging for usage and error information.
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the PutBucketTagging method directly
-// instead.
+// See PutBucketTagging for more information on using the PutBucketTagging
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the PutBucketTaggingRequest method.
// req, resp := client.PutBucketTaggingRequest(params)
@@ -5403,19 +5334,18 @@ const opPutBucketVersioning = "PutBucketVersioning"
// PutBucketVersioningRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketVersioning operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See PutBucketVersioning for usage and error information.
+// See PutBucketVersioning for more information on using the PutBucketVersioning
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the PutBucketVersioning method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the PutBucketVersioningRequest method.
// req, resp := client.PutBucketVersioningRequest(params)
@@ -5481,19 +5411,18 @@ const opPutBucketWebsite = "PutBucketWebsite"
// PutBucketWebsiteRequest generates a "aws/request.Request" representing the
// client's request for the PutBucketWebsite operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
//
-// See PutBucketWebsite for usage and error information.
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the PutBucketWebsite method directly
-// instead.
+// See PutBucketWebsite for more information on using the PutBucketWebsite
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the PutBucketWebsiteRequest method.
// req, resp := client.PutBucketWebsiteRequest(params)
@@ -5558,19 +5487,18 @@ const opPutObject = "PutObject"
// PutObjectRequest generates a "aws/request.Request" representing the
// client's request for the PutObject operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See PutObject for usage and error information.
+// See PutObject for more information on using the PutObject
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the PutObject method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the PutObjectRequest method.
// req, resp := client.PutObjectRequest(params)
@@ -5633,19 +5561,18 @@ const opPutObjectAcl = "PutObjectAcl"
// PutObjectAclRequest generates a "aws/request.Request" representing the
// client's request for the PutObjectAcl operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See PutObjectAcl for usage and error information.
+// See PutObjectAcl for more information on using the PutObjectAcl
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the PutObjectAcl method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the PutObjectAclRequest method.
// req, resp := client.PutObjectAclRequest(params)
@@ -5714,19 +5641,18 @@ const opPutObjectTagging = "PutObjectTagging"
// PutObjectTaggingRequest generates a "aws/request.Request" representing the
// client's request for the PutObjectTagging operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
//
-// See PutObjectTagging for usage and error information.
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the PutObjectTagging method directly
-// instead.
+// See PutObjectTagging for more information on using the PutObjectTagging
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the PutObjectTaggingRequest method.
// req, resp := client.PutObjectTaggingRequest(params)
@@ -5789,19 +5715,18 @@ const opRestoreObject = "RestoreObject"
// RestoreObjectRequest generates a "aws/request.Request" representing the
// client's request for the RestoreObject operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See RestoreObject for usage and error information.
+// See RestoreObject for more information on using the RestoreObject
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the RestoreObject method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the RestoreObjectRequest method.
// req, resp := client.RestoreObjectRequest(params)
@@ -5869,19 +5794,18 @@ const opUploadPart = "UploadPart"
// UploadPartRequest generates a "aws/request.Request" representing the
// client's request for the UploadPart operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
//
-// See UploadPart for usage and error information.
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the UploadPart method directly
-// instead.
+// See UploadPart for more information on using the UploadPart
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the UploadPartRequest method.
// req, resp := client.UploadPartRequest(params)
@@ -5950,19 +5874,18 @@ const opUploadPartCopy = "UploadPartCopy"
// UploadPartCopyRequest generates a "aws/request.Request" representing the
// client's request for the UploadPartCopy operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See UploadPartCopy for usage and error information.
+// See UploadPartCopy for more information on using the UploadPartCopy
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the UploadPartCopy method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the UploadPartCopyRequest method.
// req, resp := client.UploadPartCopyRequest(params)
@@ -6106,6 +6029,13 @@ func (s *AbortMultipartUploadInput) SetBucket(v string) *AbortMultipartUploadInp
return s
}
+func (s *AbortMultipartUploadInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetKey sets the Key field's value.
func (s *AbortMultipartUploadInput) SetKey(v string) *AbortMultipartUploadInput {
s.Key = &v
@@ -6515,6 +6445,13 @@ func (s *AnalyticsS3BucketDestination) SetBucket(v string) *AnalyticsS3BucketDes
return s
}
+func (s *AnalyticsS3BucketDestination) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetBucketAccountId sets the BucketAccountId field's value.
func (s *AnalyticsS3BucketDestination) SetBucketAccountId(v string) *AnalyticsS3BucketDestination {
s.BucketAccountId = &v
@@ -6873,7 +6810,7 @@ type CompleteMultipartUploadInput struct {
// Key is a required field
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
- MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure"`
+ MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
// Confirms that the requester knows that she or he will be charged for the
// request. Bucket owners need not specify this parameter in their requests.
@@ -6923,6 +6860,13 @@ func (s *CompleteMultipartUploadInput) SetBucket(v string) *CompleteMultipartUpl
return s
}
+func (s *CompleteMultipartUploadInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetKey sets the Key field's value.
func (s *CompleteMultipartUploadInput) SetKey(v string) *CompleteMultipartUploadInput {
s.Key = &v
@@ -6996,6 +6940,13 @@ func (s *CompleteMultipartUploadOutput) SetBucket(v string) *CompleteMultipartUp
return s
}
+func (s *CompleteMultipartUploadOutput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetETag sets the ETag field's value.
func (s *CompleteMultipartUploadOutput) SetETag(v string) *CompleteMultipartUploadOutput {
s.ETag = &v
@@ -7321,6 +7272,13 @@ func (s *CopyObjectInput) SetBucket(v string) *CopyObjectInput {
return s
}
+func (s *CopyObjectInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetCacheControl sets the CacheControl field's value.
func (s *CopyObjectInput) SetCacheControl(v string) *CopyObjectInput {
s.CacheControl = &v
@@ -7393,6 +7351,13 @@ func (s *CopyObjectInput) SetCopySourceSSECustomerKey(v string) *CopyObjectInput
return s
}
+func (s *CopyObjectInput) getCopySourceSSECustomerKey() (v string) {
+ if s.CopySourceSSECustomerKey == nil {
+ return v
+ }
+ return *s.CopySourceSSECustomerKey
+}
+
// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value.
func (s *CopyObjectInput) SetCopySourceSSECustomerKeyMD5(v string) *CopyObjectInput {
s.CopySourceSSECustomerKeyMD5 = &v
@@ -7465,6 +7430,13 @@ func (s *CopyObjectInput) SetSSECustomerKey(v string) *CopyObjectInput {
return s
}
+func (s *CopyObjectInput) getSSECustomerKey() (v string) {
+ if s.SSECustomerKey == nil {
+ return v
+ }
+ return *s.SSECustomerKey
+}
+
// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
func (s *CopyObjectInput) SetSSECustomerKeyMD5(v string) *CopyObjectInput {
s.SSECustomerKeyMD5 = &v
@@ -7707,7 +7679,7 @@ type CreateBucketInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure"`
+ CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
// Allows grantee the read, write, read ACP, and write ACP permissions on the
// bucket.
@@ -7761,6 +7733,13 @@ func (s *CreateBucketInput) SetBucket(v string) *CreateBucketInput {
return s
}
+func (s *CreateBucketInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetCreateBucketConfiguration sets the CreateBucketConfiguration field's value.
func (s *CreateBucketInput) SetCreateBucketConfiguration(v *CreateBucketConfiguration) *CreateBucketInput {
s.CreateBucketConfiguration = v
@@ -7902,6 +7881,9 @@ type CreateMultipartUploadInput struct {
// The type of storage to use for the object. Defaults to 'STANDARD'.
StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
+ // The tag-set for the object. The tag-set must be encoded as URL Query parameters
+ Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"`
+
// If the bucket is configured as a website, redirects requests for this object
// to another object in the same bucket or to an external URL. Amazon S3 stores
// the value of this header in the object metadata.
@@ -7949,6 +7931,13 @@ func (s *CreateMultipartUploadInput) SetBucket(v string) *CreateMultipartUploadI
return s
}
+func (s *CreateMultipartUploadInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetCacheControl sets the CacheControl field's value.
func (s *CreateMultipartUploadInput) SetCacheControl(v string) *CreateMultipartUploadInput {
s.CacheControl = &v
@@ -8039,6 +8028,13 @@ func (s *CreateMultipartUploadInput) SetSSECustomerKey(v string) *CreateMultipar
return s
}
+func (s *CreateMultipartUploadInput) getSSECustomerKey() (v string) {
+ if s.SSECustomerKey == nil {
+ return v
+ }
+ return *s.SSECustomerKey
+}
+
// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
func (s *CreateMultipartUploadInput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadInput {
s.SSECustomerKeyMD5 = &v
@@ -8063,6 +8059,12 @@ func (s *CreateMultipartUploadInput) SetStorageClass(v string) *CreateMultipartU
return s
}
+// SetTagging sets the Tagging field's value.
+func (s *CreateMultipartUploadInput) SetTagging(v string) *CreateMultipartUploadInput {
+ s.Tagging = &v
+ return s
+}
+
// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value.
func (s *CreateMultipartUploadInput) SetWebsiteRedirectLocation(v string) *CreateMultipartUploadInput {
s.WebsiteRedirectLocation = &v
@@ -8140,6 +8142,13 @@ func (s *CreateMultipartUploadOutput) SetBucket(v string) *CreateMultipartUpload
return s
}
+func (s *CreateMultipartUploadOutput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetKey sets the Key field's value.
func (s *CreateMultipartUploadOutput) SetKey(v string) *CreateMultipartUploadOutput {
s.Key = &v
@@ -8286,6 +8295,13 @@ func (s *DeleteBucketAnalyticsConfigurationInput) SetBucket(v string) *DeleteBuc
return s
}
+func (s *DeleteBucketAnalyticsConfigurationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetId sets the Id field's value.
func (s *DeleteBucketAnalyticsConfigurationInput) SetId(v string) *DeleteBucketAnalyticsConfigurationInput {
s.Id = &v
@@ -8344,6 +8360,13 @@ func (s *DeleteBucketCorsInput) SetBucket(v string) *DeleteBucketCorsInput {
return s
}
+func (s *DeleteBucketCorsInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCorsOutput
type DeleteBucketCorsOutput struct {
_ struct{} `type:"structure"`
@@ -8396,6 +8419,13 @@ func (s *DeleteBucketInput) SetBucket(v string) *DeleteBucketInput {
return s
}
+func (s *DeleteBucketInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfigurationRequest
type DeleteBucketInventoryConfigurationInput struct {
_ struct{} `type:"structure"`
@@ -8443,6 +8473,13 @@ func (s *DeleteBucketInventoryConfigurationInput) SetBucket(v string) *DeleteBuc
return s
}
+func (s *DeleteBucketInventoryConfigurationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetId sets the Id field's value.
func (s *DeleteBucketInventoryConfigurationInput) SetId(v string) *DeleteBucketInventoryConfigurationInput {
s.Id = &v
@@ -8501,6 +8538,13 @@ func (s *DeleteBucketLifecycleInput) SetBucket(v string) *DeleteBucketLifecycleI
return s
}
+func (s *DeleteBucketLifecycleInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycleOutput
type DeleteBucketLifecycleOutput struct {
_ struct{} `type:"structure"`
@@ -8563,6 +8607,13 @@ func (s *DeleteBucketMetricsConfigurationInput) SetBucket(v string) *DeleteBucke
return s
}
+func (s *DeleteBucketMetricsConfigurationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetId sets the Id field's value.
func (s *DeleteBucketMetricsConfigurationInput) SetId(v string) *DeleteBucketMetricsConfigurationInput {
s.Id = &v
@@ -8636,6 +8687,13 @@ func (s *DeleteBucketPolicyInput) SetBucket(v string) *DeleteBucketPolicyInput {
return s
}
+func (s *DeleteBucketPolicyInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicyOutput
type DeleteBucketPolicyOutput struct {
_ struct{} `type:"structure"`
@@ -8688,6 +8746,13 @@ func (s *DeleteBucketReplicationInput) SetBucket(v string) *DeleteBucketReplicat
return s
}
+func (s *DeleteBucketReplicationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplicationOutput
type DeleteBucketReplicationOutput struct {
_ struct{} `type:"structure"`
@@ -8740,6 +8805,13 @@ func (s *DeleteBucketTaggingInput) SetBucket(v string) *DeleteBucketTaggingInput
return s
}
+func (s *DeleteBucketTaggingInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTaggingOutput
type DeleteBucketTaggingOutput struct {
_ struct{} `type:"structure"`
@@ -8792,6 +8864,13 @@ func (s *DeleteBucketWebsiteInput) SetBucket(v string) *DeleteBucketWebsiteInput
return s
}
+func (s *DeleteBucketWebsiteInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsiteOutput
type DeleteBucketWebsiteOutput struct {
_ struct{} `type:"structure"`
@@ -8926,6 +9005,13 @@ func (s *DeleteObjectInput) SetBucket(v string) *DeleteObjectInput {
return s
}
+func (s *DeleteObjectInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetKey sets the Key field's value.
func (s *DeleteObjectInput) SetKey(v string) *DeleteObjectInput {
s.Key = &v
@@ -9044,6 +9130,13 @@ func (s *DeleteObjectTaggingInput) SetBucket(v string) *DeleteObjectTaggingInput
return s
}
+func (s *DeleteObjectTaggingInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetKey sets the Key field's value.
func (s *DeleteObjectTaggingInput) SetKey(v string) *DeleteObjectTaggingInput {
s.Key = &v
@@ -9088,7 +9181,7 @@ type DeleteObjectsInput struct {
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
// Delete is a required field
- Delete *Delete `locationName:"Delete" type:"structure" required:"true"`
+ Delete *Delete `locationName:"Delete" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
// The concatenation of the authentication device's serial number, a space,
// and the value that is displayed on your authentication device.
@@ -9138,6 +9231,13 @@ func (s *DeleteObjectsInput) SetBucket(v string) *DeleteObjectsInput {
return s
}
+func (s *DeleteObjectsInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetDelete sets the Delete field's value.
func (s *DeleteObjectsInput) SetDelete(v *Delete) *DeleteObjectsInput {
s.Delete = v
@@ -9287,6 +9387,13 @@ func (s *Destination) SetBucket(v string) *Destination {
return s
}
+func (s *Destination) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetStorageClass sets the StorageClass field's value.
func (s *Destination) SetStorageClass(v string) *Destination {
s.StorageClass = &v
@@ -9457,6 +9564,13 @@ func (s *GetBucketAccelerateConfigurationInput) SetBucket(v string) *GetBucketAc
return s
}
+func (s *GetBucketAccelerateConfigurationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfigurationOutput
type GetBucketAccelerateConfigurationOutput struct {
_ struct{} `type:"structure"`
@@ -9518,6 +9632,13 @@ func (s *GetBucketAclInput) SetBucket(v string) *GetBucketAclInput {
return s
}
+func (s *GetBucketAclInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAclOutput
type GetBucketAclOutput struct {
_ struct{} `type:"structure"`
@@ -9597,6 +9718,13 @@ func (s *GetBucketAnalyticsConfigurationInput) SetBucket(v string) *GetBucketAna
return s
}
+func (s *GetBucketAnalyticsConfigurationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetId sets the Id field's value.
func (s *GetBucketAnalyticsConfigurationInput) SetId(v string) *GetBucketAnalyticsConfigurationInput {
s.Id = &v
@@ -9664,6 +9792,13 @@ func (s *GetBucketCorsInput) SetBucket(v string) *GetBucketCorsInput {
return s
}
+func (s *GetBucketCorsInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCorsOutput
type GetBucketCorsOutput struct {
_ struct{} `type:"structure"`
@@ -9734,6 +9869,13 @@ func (s *GetBucketInventoryConfigurationInput) SetBucket(v string) *GetBucketInv
return s
}
+func (s *GetBucketInventoryConfigurationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetId sets the Id field's value.
func (s *GetBucketInventoryConfigurationInput) SetId(v string) *GetBucketInventoryConfigurationInput {
s.Id = &v
@@ -9801,6 +9943,13 @@ func (s *GetBucketLifecycleConfigurationInput) SetBucket(v string) *GetBucketLif
return s
}
+func (s *GetBucketLifecycleConfigurationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfigurationOutput
type GetBucketLifecycleConfigurationOutput struct {
_ struct{} `type:"structure"`
@@ -9861,6 +10010,13 @@ func (s *GetBucketLifecycleInput) SetBucket(v string) *GetBucketLifecycleInput {
return s
}
+func (s *GetBucketLifecycleInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleOutput
type GetBucketLifecycleOutput struct {
_ struct{} `type:"structure"`
@@ -9921,6 +10077,13 @@ func (s *GetBucketLocationInput) SetBucket(v string) *GetBucketLocationInput {
return s
}
+func (s *GetBucketLocationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocationOutput
type GetBucketLocationOutput struct {
_ struct{} `type:"structure"`
@@ -9981,6 +10144,13 @@ func (s *GetBucketLoggingInput) SetBucket(v string) *GetBucketLoggingInput {
return s
}
+func (s *GetBucketLoggingInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLoggingOutput
type GetBucketLoggingOutput struct {
_ struct{} `type:"structure"`
@@ -10051,6 +10221,13 @@ func (s *GetBucketMetricsConfigurationInput) SetBucket(v string) *GetBucketMetri
return s
}
+func (s *GetBucketMetricsConfigurationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetId sets the Id field's value.
func (s *GetBucketMetricsConfigurationInput) SetId(v string) *GetBucketMetricsConfigurationInput {
s.Id = &v
@@ -10120,6 +10297,13 @@ func (s *GetBucketNotificationConfigurationRequest) SetBucket(v string) *GetBuck
return s
}
+func (s *GetBucketNotificationConfigurationRequest) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyRequest
type GetBucketPolicyInput struct {
_ struct{} `type:"structure"`
@@ -10157,6 +10341,13 @@ func (s *GetBucketPolicyInput) SetBucket(v string) *GetBucketPolicyInput {
return s
}
+func (s *GetBucketPolicyInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyOutput
type GetBucketPolicyOutput struct {
_ struct{} `type:"structure" payload:"Policy"`
@@ -10218,6 +10409,13 @@ func (s *GetBucketReplicationInput) SetBucket(v string) *GetBucketReplicationInp
return s
}
+func (s *GetBucketReplicationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplicationOutput
type GetBucketReplicationOutput struct {
_ struct{} `type:"structure" payload:"ReplicationConfiguration"`
@@ -10280,6 +10478,13 @@ func (s *GetBucketRequestPaymentInput) SetBucket(v string) *GetBucketRequestPaym
return s
}
+func (s *GetBucketRequestPaymentInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPaymentOutput
type GetBucketRequestPaymentOutput struct {
_ struct{} `type:"structure"`
@@ -10341,6 +10546,13 @@ func (s *GetBucketTaggingInput) SetBucket(v string) *GetBucketTaggingInput {
return s
}
+func (s *GetBucketTaggingInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTaggingOutput
type GetBucketTaggingOutput struct {
_ struct{} `type:"structure"`
@@ -10402,6 +10614,13 @@ func (s *GetBucketVersioningInput) SetBucket(v string) *GetBucketVersioningInput
return s
}
+func (s *GetBucketVersioningInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioningOutput
type GetBucketVersioningOutput struct {
_ struct{} `type:"structure"`
@@ -10474,6 +10693,13 @@ func (s *GetBucketWebsiteInput) SetBucket(v string) *GetBucketWebsiteInput {
return s
}
+func (s *GetBucketWebsiteInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsiteOutput
type GetBucketWebsiteOutput struct {
_ struct{} `type:"structure"`
@@ -10576,6 +10802,13 @@ func (s *GetObjectAclInput) SetBucket(v string) *GetObjectAclInput {
return s
}
+func (s *GetObjectAclInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetKey sets the Key field's value.
func (s *GetObjectAclInput) SetKey(v string) *GetObjectAclInput {
s.Key = &v
@@ -10749,6 +10982,13 @@ func (s *GetObjectInput) SetBucket(v string) *GetObjectInput {
return s
}
+func (s *GetObjectInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetIfMatch sets the IfMatch field's value.
func (s *GetObjectInput) SetIfMatch(v string) *GetObjectInput {
s.IfMatch = &v
@@ -10845,6 +11085,13 @@ func (s *GetObjectInput) SetSSECustomerKey(v string) *GetObjectInput {
return s
}
+func (s *GetObjectInput) getSSECustomerKey() (v string) {
+ if s.SSECustomerKey == nil {
+ return v
+ }
+ return *s.SSECustomerKey
+}
+
// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
func (s *GetObjectInput) SetSSECustomerKeyMD5(v string) *GetObjectInput {
s.SSECustomerKeyMD5 = &v
@@ -11189,6 +11436,13 @@ func (s *GetObjectTaggingInput) SetBucket(v string) *GetObjectTaggingInput {
return s
}
+func (s *GetObjectTaggingInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetKey sets the Key field's value.
func (s *GetObjectTaggingInput) SetKey(v string) *GetObjectTaggingInput {
s.Key = &v
@@ -11285,6 +11539,13 @@ func (s *GetObjectTorrentInput) SetBucket(v string) *GetObjectTorrentInput {
return s
}
+func (s *GetObjectTorrentInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetKey sets the Key field's value.
func (s *GetObjectTorrentInput) SetKey(v string) *GetObjectTorrentInput {
s.Key = &v
@@ -11373,7 +11634,7 @@ func (s *GlacierJobParameters) SetTier(v string) *GlacierJobParameters {
type Grant struct {
_ struct{} `type:"structure"`
- Grantee *Grantee `type:"structure"`
+ Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"`
// Specifies the permission given to the grantee.
Permission *string `type:"string" enum:"Permission"`
@@ -11528,6 +11789,13 @@ func (s *HeadBucketInput) SetBucket(v string) *HeadBucketInput {
return s
}
+func (s *HeadBucketInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucketOutput
type HeadBucketOutput struct {
_ struct{} `type:"structure"`
@@ -11639,6 +11907,13 @@ func (s *HeadObjectInput) SetBucket(v string) *HeadObjectInput {
return s
}
+func (s *HeadObjectInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetIfMatch sets the IfMatch field's value.
func (s *HeadObjectInput) SetIfMatch(v string) *HeadObjectInput {
s.IfMatch = &v
@@ -11699,6 +11974,13 @@ func (s *HeadObjectInput) SetSSECustomerKey(v string) *HeadObjectInput {
return s
}
+func (s *HeadObjectInput) getSSECustomerKey() (v string) {
+ if s.SSECustomerKey == nil {
+ return v
+ }
+ return *s.SSECustomerKey
+}
+
// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
func (s *HeadObjectInput) SetSSECustomerKeyMD5(v string) *HeadObjectInput {
s.SSECustomerKeyMD5 = &v
@@ -12317,6 +12599,13 @@ func (s *InventoryS3BucketDestination) SetBucket(v string) *InventoryS3BucketDes
return s
}
+func (s *InventoryS3BucketDestination) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetFormat sets the Format field's value.
func (s *InventoryS3BucketDestination) SetFormat(v string) *InventoryS3BucketDestination {
s.Format = &v
@@ -12847,6 +13136,13 @@ func (s *ListBucketAnalyticsConfigurationsInput) SetBucket(v string) *ListBucket
return s
}
+func (s *ListBucketAnalyticsConfigurationsInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetContinuationToken sets the ContinuationToken field's value.
func (s *ListBucketAnalyticsConfigurationsInput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsInput {
s.ContinuationToken = &v
@@ -12953,6 +13249,13 @@ func (s *ListBucketInventoryConfigurationsInput) SetBucket(v string) *ListBucket
return s
}
+func (s *ListBucketInventoryConfigurationsInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetContinuationToken sets the ContinuationToken field's value.
func (s *ListBucketInventoryConfigurationsInput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsInput {
s.ContinuationToken = &v
@@ -13059,6 +13362,13 @@ func (s *ListBucketMetricsConfigurationsInput) SetBucket(v string) *ListBucketMe
return s
}
+func (s *ListBucketMetricsConfigurationsInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetContinuationToken sets the ContinuationToken field's value.
func (s *ListBucketMetricsConfigurationsInput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsInput {
s.ContinuationToken = &v
@@ -13234,6 +13544,13 @@ func (s *ListMultipartUploadsInput) SetBucket(v string) *ListMultipartUploadsInp
return s
}
+func (s *ListMultipartUploadsInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetDelimiter sets the Delimiter field's value.
func (s *ListMultipartUploadsInput) SetDelimiter(v string) *ListMultipartUploadsInput {
s.Delimiter = &v
@@ -13331,6 +13648,13 @@ func (s *ListMultipartUploadsOutput) SetBucket(v string) *ListMultipartUploadsOu
return s
}
+func (s *ListMultipartUploadsOutput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetCommonPrefixes sets the CommonPrefixes field's value.
func (s *ListMultipartUploadsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListMultipartUploadsOutput {
s.CommonPrefixes = v
@@ -13458,6 +13782,13 @@ func (s *ListObjectVersionsInput) SetBucket(v string) *ListObjectVersionsInput {
return s
}
+func (s *ListObjectVersionsInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetDelimiter sets the Delimiter field's value.
func (s *ListObjectVersionsInput) SetDelimiter(v string) *ListObjectVersionsInput {
s.Delimiter = &v
@@ -13685,6 +14016,13 @@ func (s *ListObjectsInput) SetBucket(v string) *ListObjectsInput {
return s
}
+func (s *ListObjectsInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetDelimiter sets the Delimiter field's value.
func (s *ListObjectsInput) SetDelimiter(v string) *ListObjectsInput {
s.Delimiter = &v
@@ -13897,6 +14235,13 @@ func (s *ListObjectsV2Input) SetBucket(v string) *ListObjectsV2Input {
return s
}
+func (s *ListObjectsV2Input) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetContinuationToken sets the ContinuationToken field's value.
func (s *ListObjectsV2Input) SetContinuationToken(v string) *ListObjectsV2Input {
s.ContinuationToken = &v
@@ -14146,6 +14491,13 @@ func (s *ListPartsInput) SetBucket(v string) *ListPartsInput {
return s
}
+func (s *ListPartsInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetKey sets the Key field's value.
func (s *ListPartsInput) SetKey(v string) *ListPartsInput {
s.Key = &v
@@ -14253,6 +14605,13 @@ func (s *ListPartsOutput) SetBucket(v string) *ListPartsOutput {
return s
}
+func (s *ListPartsOutput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetInitiator sets the Initiator field's value.
func (s *ListPartsOutput) SetInitiator(v *Initiator) *ListPartsOutput {
s.Initiator = v
@@ -15136,7 +15495,7 @@ type PutBucketAccelerateConfigurationInput struct {
// Specifies the Accelerate Configuration you want to set for the bucket.
//
// AccelerateConfiguration is a required field
- AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true"`
+ AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
// Name of the bucket for which the accelerate configuration is set.
//
@@ -15182,6 +15541,13 @@ func (s *PutBucketAccelerateConfigurationInput) SetBucket(v string) *PutBucketAc
return s
}
+func (s *PutBucketAccelerateConfigurationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfigurationOutput
type PutBucketAccelerateConfigurationOutput struct {
_ struct{} `type:"structure"`
@@ -15204,7 +15570,7 @@ type PutBucketAclInput struct {
// The canned ACL to apply to the bucket.
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"`
- AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"`
+ AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@@ -15272,6 +15638,13 @@ func (s *PutBucketAclInput) SetBucket(v string) *PutBucketAclInput {
return s
}
+func (s *PutBucketAclInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetGrantFullControl sets the GrantFullControl field's value.
func (s *PutBucketAclInput) SetGrantFullControl(v string) *PutBucketAclInput {
s.GrantFullControl = &v
@@ -15324,7 +15697,7 @@ type PutBucketAnalyticsConfigurationInput struct {
// The configuration and any analyses for the analytics filter.
//
// AnalyticsConfiguration is a required field
- AnalyticsConfiguration *AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"structure" required:"true"`
+ AnalyticsConfiguration *AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
// The name of the bucket to which an analytics configuration is stored.
//
@@ -15383,6 +15756,13 @@ func (s *PutBucketAnalyticsConfigurationInput) SetBucket(v string) *PutBucketAna
return s
}
+func (s *PutBucketAnalyticsConfigurationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetId sets the Id field's value.
func (s *PutBucketAnalyticsConfigurationInput) SetId(v string) *PutBucketAnalyticsConfigurationInput {
s.Id = &v
@@ -15412,7 +15792,7 @@ type PutBucketCorsInput struct {
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
// CORSConfiguration is a required field
- CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true"`
+ CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
}
// String returns the string representation
@@ -15452,6 +15832,13 @@ func (s *PutBucketCorsInput) SetBucket(v string) *PutBucketCorsInput {
return s
}
+func (s *PutBucketCorsInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetCORSConfiguration sets the CORSConfiguration field's value.
func (s *PutBucketCorsInput) SetCORSConfiguration(v *CORSConfiguration) *PutBucketCorsInput {
s.CORSConfiguration = v
@@ -15490,7 +15877,7 @@ type PutBucketInventoryConfigurationInput struct {
// Specifies the inventory configuration.
//
// InventoryConfiguration is a required field
- InventoryConfiguration *InventoryConfiguration `locationName:"InventoryConfiguration" type:"structure" required:"true"`
+ InventoryConfiguration *InventoryConfiguration `locationName:"InventoryConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
}
// String returns the string representation
@@ -15533,6 +15920,13 @@ func (s *PutBucketInventoryConfigurationInput) SetBucket(v string) *PutBucketInv
return s
}
+func (s *PutBucketInventoryConfigurationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetId sets the Id field's value.
func (s *PutBucketInventoryConfigurationInput) SetId(v string) *PutBucketInventoryConfigurationInput {
s.Id = &v
@@ -15567,7 +15961,7 @@ type PutBucketLifecycleConfigurationInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"`
+ LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
}
// String returns the string representation
@@ -15604,6 +15998,13 @@ func (s *PutBucketLifecycleConfigurationInput) SetBucket(v string) *PutBucketLif
return s
}
+func (s *PutBucketLifecycleConfigurationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetLifecycleConfiguration sets the LifecycleConfiguration field's value.
func (s *PutBucketLifecycleConfigurationInput) SetLifecycleConfiguration(v *BucketLifecycleConfiguration) *PutBucketLifecycleConfigurationInput {
s.LifecycleConfiguration = v
@@ -15632,7 +16033,7 @@ type PutBucketLifecycleInput struct {
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
- LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"`
+ LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
}
// String returns the string representation
@@ -15669,6 +16070,13 @@ func (s *PutBucketLifecycleInput) SetBucket(v string) *PutBucketLifecycleInput {
return s
}
+func (s *PutBucketLifecycleInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetLifecycleConfiguration sets the LifecycleConfiguration field's value.
func (s *PutBucketLifecycleInput) SetLifecycleConfiguration(v *LifecycleConfiguration) *PutBucketLifecycleInput {
s.LifecycleConfiguration = v
@@ -15698,7 +16106,7 @@ type PutBucketLoggingInput struct {
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
// BucketLoggingStatus is a required field
- BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true"`
+ BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
}
// String returns the string representation
@@ -15738,6 +16146,13 @@ func (s *PutBucketLoggingInput) SetBucket(v string) *PutBucketLoggingInput {
return s
}
+func (s *PutBucketLoggingInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetBucketLoggingStatus sets the BucketLoggingStatus field's value.
func (s *PutBucketLoggingInput) SetBucketLoggingStatus(v *BucketLoggingStatus) *PutBucketLoggingInput {
s.BucketLoggingStatus = v
@@ -15776,7 +16191,7 @@ type PutBucketMetricsConfigurationInput struct {
// Specifies the metrics configuration.
//
// MetricsConfiguration is a required field
- MetricsConfiguration *MetricsConfiguration `locationName:"MetricsConfiguration" type:"structure" required:"true"`
+ MetricsConfiguration *MetricsConfiguration `locationName:"MetricsConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
}
// String returns the string representation
@@ -15819,6 +16234,13 @@ func (s *PutBucketMetricsConfigurationInput) SetBucket(v string) *PutBucketMetri
return s
}
+func (s *PutBucketMetricsConfigurationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetId sets the Id field's value.
func (s *PutBucketMetricsConfigurationInput) SetId(v string) *PutBucketMetricsConfigurationInput {
s.Id = &v
@@ -15857,7 +16279,7 @@ type PutBucketNotificationConfigurationInput struct {
// this element is empty, notifications are turned off on the bucket.
//
// NotificationConfiguration is a required field
- NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true"`
+ NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
}
// String returns the string representation
@@ -15897,6 +16319,13 @@ func (s *PutBucketNotificationConfigurationInput) SetBucket(v string) *PutBucket
return s
}
+func (s *PutBucketNotificationConfigurationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetNotificationConfiguration sets the NotificationConfiguration field's value.
func (s *PutBucketNotificationConfigurationInput) SetNotificationConfiguration(v *NotificationConfiguration) *PutBucketNotificationConfigurationInput {
s.NotificationConfiguration = v
@@ -15926,7 +16355,7 @@ type PutBucketNotificationInput struct {
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
// NotificationConfiguration is a required field
- NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true"`
+ NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
}
// String returns the string representation
@@ -15961,6 +16390,13 @@ func (s *PutBucketNotificationInput) SetBucket(v string) *PutBucketNotificationI
return s
}
+func (s *PutBucketNotificationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetNotificationConfiguration sets the NotificationConfiguration field's value.
func (s *PutBucketNotificationInput) SetNotificationConfiguration(v *NotificationConfigurationDeprecated) *PutBucketNotificationInput {
s.NotificationConfiguration = v
@@ -16027,6 +16463,13 @@ func (s *PutBucketPolicyInput) SetBucket(v string) *PutBucketPolicyInput {
return s
}
+func (s *PutBucketPolicyInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetPolicy sets the Policy field's value.
func (s *PutBucketPolicyInput) SetPolicy(v string) *PutBucketPolicyInput {
s.Policy = &v
@@ -16059,7 +16502,7 @@ type PutBucketReplicationInput struct {
// replication configuration size can be up to 2 MB.
//
// ReplicationConfiguration is a required field
- ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true"`
+ ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
}
// String returns the string representation
@@ -16099,6 +16542,13 @@ func (s *PutBucketReplicationInput) SetBucket(v string) *PutBucketReplicationInp
return s
}
+func (s *PutBucketReplicationInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetReplicationConfiguration sets the ReplicationConfiguration field's value.
func (s *PutBucketReplicationInput) SetReplicationConfiguration(v *ReplicationConfiguration) *PutBucketReplicationInput {
s.ReplicationConfiguration = v
@@ -16128,7 +16578,7 @@ type PutBucketRequestPaymentInput struct {
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
// RequestPaymentConfiguration is a required field
- RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true"`
+ RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
}
// String returns the string representation
@@ -16168,6 +16618,13 @@ func (s *PutBucketRequestPaymentInput) SetBucket(v string) *PutBucketRequestPaym
return s
}
+func (s *PutBucketRequestPaymentInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetRequestPaymentConfiguration sets the RequestPaymentConfiguration field's value.
func (s *PutBucketRequestPaymentInput) SetRequestPaymentConfiguration(v *RequestPaymentConfiguration) *PutBucketRequestPaymentInput {
s.RequestPaymentConfiguration = v
@@ -16197,7 +16654,7 @@ type PutBucketTaggingInput struct {
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
// Tagging is a required field
- Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true"`
+ Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
}
// String returns the string representation
@@ -16237,6 +16694,13 @@ func (s *PutBucketTaggingInput) SetBucket(v string) *PutBucketTaggingInput {
return s
}
+func (s *PutBucketTaggingInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetTagging sets the Tagging field's value.
func (s *PutBucketTaggingInput) SetTagging(v *Tagging) *PutBucketTaggingInput {
s.Tagging = v
@@ -16270,7 +16734,7 @@ type PutBucketVersioningInput struct {
MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"`
// VersioningConfiguration is a required field
- VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true"`
+ VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
}
// String returns the string representation
@@ -16305,6 +16769,13 @@ func (s *PutBucketVersioningInput) SetBucket(v string) *PutBucketVersioningInput
return s
}
+func (s *PutBucketVersioningInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetMFA sets the MFA field's value.
func (s *PutBucketVersioningInput) SetMFA(v string) *PutBucketVersioningInput {
s.MFA = &v
@@ -16340,7 +16811,7 @@ type PutBucketWebsiteInput struct {
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
// WebsiteConfiguration is a required field
- WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true"`
+ WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
}
// String returns the string representation
@@ -16380,6 +16851,13 @@ func (s *PutBucketWebsiteInput) SetBucket(v string) *PutBucketWebsiteInput {
return s
}
+func (s *PutBucketWebsiteInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetWebsiteConfiguration sets the WebsiteConfiguration field's value.
func (s *PutBucketWebsiteInput) SetWebsiteConfiguration(v *WebsiteConfiguration) *PutBucketWebsiteInput {
s.WebsiteConfiguration = v
@@ -16408,7 +16886,7 @@ type PutObjectAclInput struct {
// The canned ACL to apply to the object.
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
- AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"`
+ AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@@ -16494,6 +16972,13 @@ func (s *PutObjectAclInput) SetBucket(v string) *PutObjectAclInput {
return s
}
+func (s *PutObjectAclInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetGrantFullControl sets the GrantFullControl field's value.
func (s *PutObjectAclInput) SetGrantFullControl(v string) *PutObjectAclInput {
s.GrantFullControl = &v
@@ -16600,6 +17085,9 @@ type PutObjectInput struct {
// body cannot be determined automatically.
ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"`
+ // The base64-encoded 128-bit MD5 digest of the part data.
+ ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"`
+
// A standard MIME type describing the format of the object data.
ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
@@ -16716,6 +17204,13 @@ func (s *PutObjectInput) SetBucket(v string) *PutObjectInput {
return s
}
+func (s *PutObjectInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetCacheControl sets the CacheControl field's value.
func (s *PutObjectInput) SetCacheControl(v string) *PutObjectInput {
s.CacheControl = &v
@@ -16746,6 +17241,12 @@ func (s *PutObjectInput) SetContentLength(v int64) *PutObjectInput {
return s
}
+// SetContentMD5 sets the ContentMD5 field's value.
+func (s *PutObjectInput) SetContentMD5(v string) *PutObjectInput {
+ s.ContentMD5 = &v
+ return s
+}
+
// SetContentType sets the ContentType field's value.
func (s *PutObjectInput) SetContentType(v string) *PutObjectInput {
s.ContentType = &v
@@ -16812,6 +17313,13 @@ func (s *PutObjectInput) SetSSECustomerKey(v string) *PutObjectInput {
return s
}
+func (s *PutObjectInput) getSSECustomerKey() (v string) {
+ if s.SSECustomerKey == nil {
+ return v
+ }
+ return *s.SSECustomerKey
+}
+
// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
func (s *PutObjectInput) SetSSECustomerKeyMD5(v string) *PutObjectInput {
s.SSECustomerKeyMD5 = &v
@@ -16954,7 +17462,7 @@ type PutObjectTaggingInput struct {
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
// Tagging is a required field
- Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true"`
+ Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
}
@@ -17002,6 +17510,13 @@ func (s *PutObjectTaggingInput) SetBucket(v string) *PutObjectTaggingInput {
return s
}
+func (s *PutObjectTaggingInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetKey sets the Key field's value.
func (s *PutObjectTaggingInput) SetKey(v string) *PutObjectTaggingInput {
s.Key = &v
@@ -17488,7 +18003,7 @@ type RestoreObjectInput struct {
// at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
- RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure"`
+ RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
}
@@ -17533,6 +18048,13 @@ func (s *RestoreObjectInput) SetBucket(v string) *RestoreObjectInput {
return s
}
+func (s *RestoreObjectInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetKey sets the Key field's value.
func (s *RestoreObjectInput) SetKey(v string) *RestoreObjectInput {
s.Key = &v
@@ -18008,7 +18530,7 @@ func (s *Tagging) SetTagSet(v []*Tag) *Tagging {
type TargetGrant struct {
_ struct{} `type:"structure"`
- Grantee *Grantee `type:"structure"`
+ Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"`
// Logging permissions assigned to the Grantee for the bucket.
Permission *string `type:"string" enum:"BucketLogsPermission"`
@@ -18348,6 +18870,13 @@ func (s *UploadPartCopyInput) SetBucket(v string) *UploadPartCopyInput {
return s
}
+func (s *UploadPartCopyInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetCopySource sets the CopySource field's value.
func (s *UploadPartCopyInput) SetCopySource(v string) *UploadPartCopyInput {
s.CopySource = &v
@@ -18396,6 +18925,13 @@ func (s *UploadPartCopyInput) SetCopySourceSSECustomerKey(v string) *UploadPartC
return s
}
+func (s *UploadPartCopyInput) getCopySourceSSECustomerKey() (v string) {
+ if s.CopySourceSSECustomerKey == nil {
+ return v
+ }
+ return *s.CopySourceSSECustomerKey
+}
+
// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value.
func (s *UploadPartCopyInput) SetCopySourceSSECustomerKeyMD5(v string) *UploadPartCopyInput {
s.CopySourceSSECustomerKeyMD5 = &v
@@ -18432,6 +18968,13 @@ func (s *UploadPartCopyInput) SetSSECustomerKey(v string) *UploadPartCopyInput {
return s
}
+func (s *UploadPartCopyInput) getSSECustomerKey() (v string) {
+ if s.SSECustomerKey == nil {
+ return v
+ }
+ return *s.SSECustomerKey
+}
+
// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
func (s *UploadPartCopyInput) SetSSECustomerKeyMD5(v string) *UploadPartCopyInput {
s.SSECustomerKeyMD5 = &v
@@ -18545,6 +19088,9 @@ type UploadPartInput struct {
// body cannot be determined automatically.
ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"`
+ // The base64-encoded 128-bit MD5 digest of the part data.
+ ContentMD5 *string `location:"header" locationName:"Content-MD5" type:"string"`
+
// Object key for which the multipart upload was initiated.
//
// Key is a required field
@@ -18631,12 +19177,25 @@ func (s *UploadPartInput) SetBucket(v string) *UploadPartInput {
return s
}
+func (s *UploadPartInput) getBucket() (v string) {
+ if s.Bucket == nil {
+ return v
+ }
+ return *s.Bucket
+}
+
// SetContentLength sets the ContentLength field's value.
func (s *UploadPartInput) SetContentLength(v int64) *UploadPartInput {
s.ContentLength = &v
return s
}
+// SetContentMD5 sets the ContentMD5 field's value.
+func (s *UploadPartInput) SetContentMD5(v string) *UploadPartInput {
+ s.ContentMD5 = &v
+ return s
+}
+
// SetKey sets the Key field's value.
func (s *UploadPartInput) SetKey(v string) *UploadPartInput {
s.Key = &v
@@ -18667,6 +19226,13 @@ func (s *UploadPartInput) SetSSECustomerKey(v string) *UploadPartInput {
return s
}
+func (s *UploadPartInput) getSSECustomerKey() (v string) {
+ if s.SSECustomerKey == nil {
+ return v
+ }
+ return *s.SSECustomerKey
+}
+
// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value.
func (s *UploadPartInput) SetSSECustomerKeyMD5(v string) *UploadPartInput {
s.SSECustomerKeyMD5 = &v
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go
index c3a2702..bc68a46 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go
@@ -12,6 +12,69 @@ import (
var reBucketLocation = regexp.MustCompile(`>([^<>]+)<\/Location`)
+// NormalizeBucketLocation is a utility function which will update the
+// passed in value to always be a region ID. Generally this would be used
+// with GetBucketLocation API operation.
+//
+// Replaces empty string with "us-east-1", and "EU" with "eu-west-1".
+//
+// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
+// for more information on the values that can be returned.
+func NormalizeBucketLocation(loc string) string {
+ switch loc {
+ case "":
+ loc = "us-east-1"
+ case "EU":
+ loc = "eu-west-1"
+ }
+
+ return loc
+}
+
+// NormalizeBucketLocationHandler is a request handler which will update the
+// GetBucketLocation's result LocationConstraint value to always be a region ID.
+//
+// Replaces empty string with "us-east-1", and "EU" with "eu-west-1".
+//
+// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
+// for more information on the values that can be returned.
+//
+// req, result := svc.GetBucketLocationRequest(&s3.GetBucketLocationInput{
+// Bucket: aws.String(bucket),
+// })
+// req.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler)
+// err := req.Send()
+var NormalizeBucketLocationHandler = request.NamedHandler{
+ Name: "awssdk.s3.NormalizeBucketLocation",
+ Fn: func(req *request.Request) {
+ if req.Error != nil {
+ return
+ }
+
+ out := req.Data.(*GetBucketLocationOutput)
+ loc := NormalizeBucketLocation(aws.StringValue(out.LocationConstraint))
+ out.LocationConstraint = aws.String(loc)
+ },
+}
+
+// WithNormalizeBucketLocation is a request option which will update the
+// GetBucketLocation's result LocationConstraint value to always be a region ID.
+//
+// Replaces empty string with "us-east-1", and "EU" with "eu-west-1".
+//
+// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html
+// for more information on the values that can be returned.
+//
+// result, err := svc.GetBucketLocationWithContext(ctx,
+// &s3.GetBucketLocationInput{
+// Bucket: aws.String(bucket),
+// },
+// s3.WithNormalizeBucketLocation,
+// )
+func WithNormalizeBucketLocation(r *request.Request) {
+ r.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler)
+}
+
func buildGetBucketLocation(r *request.Request) {
if r.DataFilled() {
out := r.Data.(*GetBucketLocationOutput)
@@ -24,7 +87,7 @@ func buildGetBucketLocation(r *request.Request) {
match := reBucketLocation.FindSubmatch(b)
if len(match) > 1 {
loc := string(match[1])
- out.LocationConstraint = &loc
+ out.LocationConstraint = aws.String(loc)
}
}
}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
index 8463347..899d5e8 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go
@@ -44,3 +44,21 @@ func defaultInitRequestFn(r *request.Request) {
r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError)
}
}
+
+// bucketGetter is an accessor interface to grab the "Bucket" field from
+// an S3 type.
+type bucketGetter interface {
+ getBucket() string
+}
+
+// sseCustomerKeyGetter is an accessor interface to grab the "SSECustomerKey"
+// field from an S3 type.
+type sseCustomerKeyGetter interface {
+ getSSECustomerKey() string
+}
+
+// copySourceSSECustomerKeyGetter is an accessor interface to grab the
+// "CopySourceSSECustomerKey" field from an S3 type.
+type copySourceSSECustomerKeyGetter interface {
+ getCopySourceSSECustomerKey() string
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go
new file mode 100644
index 0000000..30068d1
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go
@@ -0,0 +1,26 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+// Package s3 provides the client and types for making API
+// requests to Amazon Simple Storage Service.
+//
+// See https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01 for more information on this service.
+//
+// See s3 package documentation for more information.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/
+//
+// Using the Client
+//
+// To Amazon Simple Storage Service with the SDK use the New function to create
+// a new service client. With that client you can make API requests to the service.
+// These clients are safe to use concurrently.
+//
+// See the SDK's documentation for more information on how to use the SDK.
+// https://docs.aws.amazon.com/sdk-for-go/api/
+//
+// See aws.Config documentation for more information on configuring SDK clients.
+// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
+//
+// See the Amazon Simple Storage Service client S3 for more
+// information on creating client for this service.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/#New
+package s3
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go
new file mode 100644
index 0000000..b794a63
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go
@@ -0,0 +1,109 @@
+// Upload Managers
+//
+// The s3manager package's Uploader provides concurrent upload of content to S3
+// by taking advantage of S3's Multipart APIs. The Uploader also supports both
+// io.Reader for streaming uploads, and will also take advantage of io.ReadSeeker
+// for optimizations if the Body satisfies that type. Once the Uploader instance
+// is created you can call Upload concurrently from multiple goroutines safely.
+//
+// // The session the S3 Uploader will use
+// sess := session.Must(session.NewSession())
+//
+// // Create an uploader with the session and default options
+// uploader := s3manager.NewUploader(sess)
+//
+// f, err := os.Open(filename)
+// if err != nil {
+// return fmt.Errorf("failed to open file %q, %v", filename, err)
+// }
+//
+// // Upload the file to S3.
+// result, err := uploader.Upload(&s3manager.UploadInput{
+// Bucket: aws.String(myBucket),
+// Key: aws.String(myString),
+// Body: f,
+// })
+// if err != nil {
+// return fmt.Errorf("failed to upload file, %v", err)
+// }
+// fmt.Printf("file uploaded to, %s\n", aws.StringValue(result.Location))
+//
+// See the s3manager package's Uploader type documentation for more information.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Uploader
+//
+// Download Manager
+//
+// The s3manager package's Downloader provides concurrently downloading of Objects
+// from S3. The Downloader will write S3 Object content with an io.WriterAt.
+// Once the Downloader instance is created you can call Upload concurrently from
+// multiple goroutines safely.
+//
+// // The session the S3 Downloader will use
+// sess := session.Must(session.NewSession())
+//
+// // Create a downloader with the session and default options
+// downloader := s3manager.NewDownloader(sess)
+//
+// // Create a file to write the S3 Object contents to.
+// f, err := os.Create(filename)
+// if err != nil {
+// return fmt.Errorf("failed to create file %q, %v", filename, err)
+// }
+//
+// // Write the contents of S3 Object to the file
+// n, err := downloader.Download(f, &s3.GetObjectInput{
+// Bucket: aws.String(myBucket),
+// Key: aws.String(myString),
+// })
+// if err != nil {
+// return fmt.Errorf("failed to upload file, %v", err)
+// }
+// fmt.Printf("file downloaded, %d bytes\n", n)
+//
+// See the s3manager package's Downloader type documentation for more information.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Downloader
+//
+// Get Bucket Region
+//
+// GetBucketRegion will attempt to get the region for a bucket using a region
+// hint to determine which AWS partition to perform the query on. Use this utility
+// to determine the region a bucket is in.
+//
+// sess := session.Must(session.NewSession())
+//
+// bucket := "my-bucket"
+// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2")
+// if err != nil {
+// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" {
+// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket)
+// }
+// return err
+// }
+// fmt.Printf("Bucket %s is in %s region\n", bucket, region)
+//
+// See the s3manager package's GetBucketRegion function documentation for more information
+// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#GetBucketRegion
+//
+// S3 Crypto Client
+//
+// The s3crypto package provides the tools to upload and download encrypted
+// content from S3. The Encryption and Decryption clients can be used concurrently
+// once the client is created.
+//
+// sess := session.Must(session.NewSession())
+//
+// // Create the decryption client.
+// svc := s3crypto.NewDecryptionClient(sess)
+//
+// // The object will be downloaded from S3 and decrypted locally. By metadata
+// // about the object's encryption will instruct the decryption client how
+// // decrypt the content of the object. By default KMS is used for keys.
+// result, err := svc.GetObject(&s3.GetObjectInput {
+// Bucket: aws.String(myBucket),
+// Key: aws.String(myKey),
+// })
+//
+// See the s3crypto package documentation for more information.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3crypto/
+//
+package s3
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go
index ec3ffe4..a7fbc2d 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go
@@ -8,7 +8,6 @@ import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/aws/request"
)
@@ -113,15 +112,9 @@ func updateEndpointForAccelerate(r *request.Request) {
// Attempts to retrieve the bucket name from the request input parameters.
// If no bucket is found, or the field is empty "", false will be returned.
func bucketNameFromReqParams(params interface{}) (string, bool) {
- b, _ := awsutil.ValuesAtPath(params, "Bucket")
- if len(b) == 0 {
- return "", false
- }
-
- if bucket, ok := b[0].(*string); ok {
- if bucketStr := aws.StringValue(bucket); bucketStr != "" {
- return bucketStr, true
- }
+ if iface, ok := params.(bucketGetter); ok {
+ b := iface.getBucket()
+ return b, len(b) > 0
}
return "", false
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go
index 3fb5b3b..614e477 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go
@@ -11,10 +11,12 @@ import (
"github.com/aws/aws-sdk-go/private/protocol/restxml"
)
-// S3 is a client for Amazon S3.
-// The service client's operations are safe to be used concurrently.
-// It is not safe to mutate any of the client's properties though.
-// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01
+// S3 provides the API operation methods for making requests to
+// Amazon Simple Storage Service. See this package's package overview docs
+// for details on the service.
+//
+// S3 methods are safe to use concurrently. It is not safe to
+// modify mutate any of the struct's properties though.
type S3 struct {
*client.Client
}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go
index 268ea2f..8010c4f 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go
@@ -5,17 +5,27 @@ import (
"encoding/base64"
"github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/aws/request"
)
var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil)
func validateSSERequiresSSL(r *request.Request) {
- if r.HTTPRequest.URL.Scheme != "https" {
- p, _ := awsutil.ValuesAtPath(r.Params, "SSECustomerKey||CopySourceSSECustomerKey")
- if len(p) > 0 {
+ if r.HTTPRequest.URL.Scheme == "https" {
+ return
+ }
+
+ if iface, ok := r.Params.(sseCustomerKeyGetter); ok {
+ if len(iface.getSSECustomerKey()) > 0 {
+ r.Error = errSSERequiresSSL
+ return
+ }
+ }
+
+ if iface, ok := r.Params.(copySourceSSECustomerKeyGetter); ok {
+ if len(iface.getCopySourceSSECustomerKey()) > 0 {
r.Error = errSSERequiresSSL
+ return
}
}
}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go
index cccfa8c..2596c69 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go
@@ -11,7 +11,7 @@ import (
// WaitUntilBucketExists uses the Amazon S3 API operation
// HeadBucket to wait for a condition to be met before returning.
-// If the condition is not meet within the max attempt window an error will
+// If the condition is not met within the max attempt window, an error will
// be returned.
func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error {
return c.WaitUntilBucketExistsWithContext(aws.BackgroundContext(), input)
@@ -72,7 +72,7 @@ func (c *S3) WaitUntilBucketExistsWithContext(ctx aws.Context, input *HeadBucket
// WaitUntilBucketNotExists uses the Amazon S3 API operation
// HeadBucket to wait for a condition to be met before returning.
-// If the condition is not meet within the max attempt window an error will
+// If the condition is not met within the max attempt window, an error will
// be returned.
func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error {
return c.WaitUntilBucketNotExistsWithContext(aws.BackgroundContext(), input)
@@ -118,7 +118,7 @@ func (c *S3) WaitUntilBucketNotExistsWithContext(ctx aws.Context, input *HeadBuc
// WaitUntilObjectExists uses the Amazon S3 API operation
// HeadObject to wait for a condition to be met before returning.
-// If the condition is not meet within the max attempt window an error will
+// If the condition is not met within the max attempt window, an error will
// be returned.
func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error {
return c.WaitUntilObjectExistsWithContext(aws.BackgroundContext(), input)
@@ -169,7 +169,7 @@ func (c *S3) WaitUntilObjectExistsWithContext(ctx aws.Context, input *HeadObject
// WaitUntilObjectNotExists uses the Amazon S3 API operation
// HeadObject to wait for a condition to be met before returning.
-// If the condition is not meet within the max attempt window an error will
+// If the condition is not met within the max attempt window, an error will
// be returned.
func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error {
return c.WaitUntilObjectNotExistsWithContext(aws.BackgroundContext(), input)
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
index 19dd0bf..3b8be43 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
@@ -1,6 +1,5 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
-// Package sts provides a client for AWS Security Token Service.
package sts
import (
@@ -15,19 +14,18 @@ const opAssumeRole = "AssumeRole"
// AssumeRoleRequest generates a "aws/request.Request" representing the
// client's request for the AssumeRole operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
//
-// See AssumeRole for usage and error information.
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the AssumeRole method directly
-// instead.
+// See AssumeRole for more information on using the AssumeRole
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the AssumeRoleRequest method.
// req, resp := client.AssumeRoleRequest(params)
@@ -196,19 +194,18 @@ const opAssumeRoleWithSAML = "AssumeRoleWithSAML"
// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the
// client's request for the AssumeRoleWithSAML operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See AssumeRoleWithSAML for usage and error information.
+// See AssumeRoleWithSAML for more information on using the AssumeRoleWithSAML
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the AssumeRoleWithSAML method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the AssumeRoleWithSAMLRequest method.
// req, resp := client.AssumeRoleWithSAMLRequest(params)
@@ -370,19 +367,18 @@ const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity"
// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the
// client's request for the AssumeRoleWithWebIdentity operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See AssumeRoleWithWebIdentity for usage and error information.
+// See AssumeRoleWithWebIdentity for more information on using the AssumeRoleWithWebIdentity
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the AssumeRoleWithWebIdentity method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the AssumeRoleWithWebIdentityRequest method.
// req, resp := client.AssumeRoleWithWebIdentityRequest(params)
@@ -573,19 +569,18 @@ const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage"
// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the
// client's request for the DecodeAuthorizationMessage operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
//
-// See DecodeAuthorizationMessage for usage and error information.
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the DecodeAuthorizationMessage method directly
-// instead.
+// See DecodeAuthorizationMessage for more information on using the DecodeAuthorizationMessage
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the DecodeAuthorizationMessageRequest method.
// req, resp := client.DecodeAuthorizationMessageRequest(params)
@@ -686,19 +681,18 @@ const opGetCallerIdentity = "GetCallerIdentity"
// GetCallerIdentityRequest generates a "aws/request.Request" representing the
// client's request for the GetCallerIdentity operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See GetCallerIdentity for usage and error information.
+// See GetCallerIdentity for more information on using the GetCallerIdentity
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the GetCallerIdentity method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the GetCallerIdentityRequest method.
// req, resp := client.GetCallerIdentityRequest(params)
@@ -762,19 +756,18 @@ const opGetFederationToken = "GetFederationToken"
// GetFederationTokenRequest generates a "aws/request.Request" representing the
// client's request for the GetFederationToken operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
//
-// See GetFederationToken for usage and error information.
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the GetFederationToken method directly
-// instead.
+// See GetFederationToken for more information on using the GetFederationToken
+// API call, and error handling.
+//
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the GetFederationTokenRequest method.
// req, resp := client.GetFederationTokenRequest(params)
@@ -932,19 +925,18 @@ const opGetSessionToken = "GetSessionToken"
// GetSessionTokenRequest generates a "aws/request.Request" representing the
// client's request for the GetSessionToken operation. The "output" return
-// value can be used to capture response data after the request's "Send" method
-// is called.
+// value will be populated with the request's response once the request complets
+// successfuly.
+//
+// Use "Send" method on the returned Request to send the API call to the service.
+// the "output" return value is not valid until after Send returns without error.
//
-// See GetSessionToken for usage and error information.
+// See GetSessionToken for more information on using the GetSessionToken
+// API call, and error handling.
//
-// Creating a request object using this method should be used when you want to inject
-// custom logic into the request's lifecycle using a custom handler, or if you want to
-// access properties on the request object before or after sending the request. If
-// you just want the service response, call the GetSessionToken method directly
-// instead.
+// This method is useful when you want to inject custom logic or configuration
+// into the SDK's request lifecycle. Such as custom headers, or retry logic.
//
-// Note: You must call the "Send" method on the returned request object in order
-// to execute the request.
//
// // Example sending a request using the GetSessionTokenRequest method.
// req, resp := client.GetSessionTokenRequest(params)
@@ -1086,7 +1078,7 @@ type AssumeRoleInput struct {
//
// The regex used to validated this parameter is a string of characters consisting
// of upper- and lower-case alphanumeric characters with no spaces. You can
- // also include underscores or any of the following characters: =,.@:\/-
+ // also include underscores or any of the following characters: =,.@:/-
ExternalId *string `min:"2" type:"string"`
// An IAM policy in JSON format.
@@ -2270,9 +2262,9 @@ type GetSessionTokenInput struct {
// You can find the device for an IAM user by going to the AWS Management Console
// and viewing the user's security credentials.
//
- // The regex used to validate this parameter is a string of characters consisting
+ // The regex used to validated this parameter is a string of characters consisting
// of upper- and lower-case alphanumeric characters with no spaces. You can
- // also include underscores or any of the following characters: =,.@-
+ // also include underscores or any of the following characters: =,.@:/-
SerialNumber *string `min:"9" type:"string"`
// The value provided by the MFA device, if MFA is required. If any policy requires
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go
new file mode 100644
index 0000000..a43fa80
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go
@@ -0,0 +1,72 @@
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
+
+// Package sts provides the client and types for making API
+// requests to AWS Security Token Service.
+//
+// The AWS Security Token Service (STS) is a web service that enables you to
+// request temporary, limited-privilege credentials for AWS Identity and Access
+// Management (IAM) users or for users that you authenticate (federated users).
+// This guide provides descriptions of the STS API. For more detailed information
+// about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
+//
+// As an alternative to using the API, you can use one of the AWS SDKs, which
+// consist of libraries and sample code for various programming languages and
+// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient
+// way to create programmatic access to STS. For example, the SDKs take care
+// of cryptographically signing requests, managing errors, and retrying requests
+// automatically. For information about the AWS SDKs, including how to download
+// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/).
+//
+// For information about setting up signatures and authorization through the
+// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html)
+// in the AWS General Reference. For general information about the Query API,
+// go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html)
+// in Using IAM. For information about using security tokens with other AWS
+// products, go to AWS Services That Work with IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html)
+// in the IAM User Guide.
+//
+// If you're new to AWS and need additional technical information about a specific
+// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/
+// (http://aws.amazon.com/documentation/).
+//
+// Endpoints
+//
+// The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com
+// that maps to the US East (N. Virginia) region. Additional regions are available
+// and are activated by default. For more information, see Activating and Deactivating
+// AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
+// in the IAM User Guide.
+//
+// For information about STS endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region)
+// in the AWS General Reference.
+//
+// Recording API requests
+//
+// STS supports AWS CloudTrail, which is a service that records AWS calls for
+// your AWS account and delivers log files to an Amazon S3 bucket. By using
+// information collected by CloudTrail, you can determine what requests were
+// successfully made to STS, who made the request, when it was made, and so
+// on. To learn more about CloudTrail, including how to turn it on and find
+// your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html).
+//
+// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service.
+//
+// See sts package documentation for more information.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/
+//
+// Using the Client
+//
+// To AWS Security Token Service with the SDK use the New function to create
+// a new service client. With that client you can make API requests to the service.
+// These clients are safe to use concurrently.
+//
+// See the SDK's documentation for more information on how to use the SDK.
+// https://docs.aws.amazon.com/sdk-for-go/api/
+//
+// See aws.Config documentation for more information on configuring SDK clients.
+// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
+//
+// See the AWS Security Token Service client STS for more
+// information on creating client for this service.
+// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New
+package sts
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go
index be21838..1ee5839 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go
@@ -11,54 +11,12 @@ import (
"github.com/aws/aws-sdk-go/private/protocol/query"
)
-// The AWS Security Token Service (STS) is a web service that enables you to
-// request temporary, limited-privilege credentials for AWS Identity and Access
-// Management (IAM) users or for users that you authenticate (federated users).
-// This guide provides descriptions of the STS API. For more detailed information
-// about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
+// STS provides the API operation methods for making requests to
+// AWS Security Token Service. See this package's package overview docs
+// for details on the service.
//
-// As an alternative to using the API, you can use one of the AWS SDKs, which
-// consist of libraries and sample code for various programming languages and
-// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient
-// way to create programmatic access to STS. For example, the SDKs take care
-// of cryptographically signing requests, managing errors, and retrying requests
-// automatically. For information about the AWS SDKs, including how to download
-// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/).
-//
-// For information about setting up signatures and authorization through the
-// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html)
-// in the AWS General Reference. For general information about the Query API,
-// go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html)
-// in Using IAM. For information about using security tokens with other AWS
-// products, go to AWS Services That Work with IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html)
-// in the IAM User Guide.
-//
-// If you're new to AWS and need additional technical information about a specific
-// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/
-// (http://aws.amazon.com/documentation/).
-//
-// Endpoints
-//
-// The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com
-// that maps to the US East (N. Virginia) region. Additional regions are available
-// and are activated by default. For more information, see Activating and Deactivating
-// AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
-// in the IAM User Guide.
-//
-// For information about STS endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region)
-// in the AWS General Reference.
-//
-// Recording API requests
-//
-// STS supports AWS CloudTrail, which is a service that records AWS calls for
-// your AWS account and delivers log files to an Amazon S3 bucket. By using
-// information collected by CloudTrail, you can determine what requests were
-// successfully made to STS, who made the request, when it was made, and so
-// on. To learn more about CloudTrail, including how to turn it on and find
-// your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html).
-// The service client's operations are safe to be used concurrently.
-// It is not safe to mutate any of the client's properties though.
-// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15
+// STS methods are safe to use concurrently. It is not safe to
+// modify mutate any of the struct's properties though.
type STS struct {
*client.Client
}
diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE
index c836416..bc52e96 100644
--- a/vendor/github.com/davecgh/go-spew/LICENSE
+++ b/vendor/github.com/davecgh/go-spew/LICENSE
@@ -2,7 +2,7 @@ ISC License
Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
-Permission to use, copy, modify, and distribute this software for any
+Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go
index 8a4a658..7f166c3 100644
--- a/vendor/github.com/davecgh/go-spew/spew/bypass.go
+++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go
@@ -41,9 +41,9 @@ var (
// after commit 82f48826c6c7 which changed the format again to mirror
// the original format. Code in the init function updates these offsets
// as necessary.
- offsetPtr = uintptr(ptrSize)
+ offsetPtr = ptrSize
offsetScalar = uintptr(0)
- offsetFlag = uintptr(ptrSize * 2)
+ offsetFlag = ptrSize * 2
// flagKindWidth and flagKindShift indicate various bits that the
// reflect package uses internally to track kind information.
@@ -58,7 +58,7 @@ var (
// changed their positions. Code in the init function updates these
// flags as necessary.
flagKindWidth = uintptr(5)
- flagKindShift = uintptr(flagKindWidth - 1)
+ flagKindShift = flagKindWidth - 1
flagRO = uintptr(1 << 0)
flagIndir = uintptr(1 << 1)
)
diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go
index 7c519ff..1be8ce9 100644
--- a/vendor/github.com/davecgh/go-spew/spew/common.go
+++ b/vendor/github.com/davecgh/go-spew/spew/common.go
@@ -180,7 +180,7 @@ func printComplex(w io.Writer, c complex128, floatPrecision int) {
w.Write(closeParenBytes)
}
-// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'
+// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
// prefix to Writer w.
func printHexPtr(w io.Writer, p uintptr) {
// Null pointer.
diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go
index df1d582..f78d89f 100644
--- a/vendor/github.com/davecgh/go-spew/spew/dump.go
+++ b/vendor/github.com/davecgh/go-spew/spew/dump.go
@@ -35,16 +35,16 @@ var (
// cCharRE is a regular expression that matches a cgo char.
// It is used to detect character arrays to hexdump them.
- cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
+ cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
// char. It is used to detect unsigned character arrays to hexdump
// them.
- cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
+ cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
// It is used to detect uint8_t arrays to hexdump them.
- cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
+ cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
)
// dumpState contains information about the state of a dump operation.
@@ -143,10 +143,10 @@ func (d *dumpState) dumpPtr(v reflect.Value) {
// Display dereferenced value.
d.w.Write(openParenBytes)
switch {
- case nilFound == true:
+ case nilFound:
d.w.Write(nilAngleBytes)
- case cycleFound == true:
+ case cycleFound:
d.w.Write(circularBytes)
default:
diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go
index c49875b..b04edb7 100644
--- a/vendor/github.com/davecgh/go-spew/spew/format.go
+++ b/vendor/github.com/davecgh/go-spew/spew/format.go
@@ -182,10 +182,10 @@ func (f *formatState) formatPtr(v reflect.Value) {
// Display dereferenced value.
switch {
- case nilFound == true:
+ case nilFound:
f.fs.Write(nilAngleBytes)
- case cycleFound == true:
+ case cycleFound:
f.fs.Write(circularShortBytes)
default:
diff --git a/vendor/github.com/go-ini/ini/LICENSE b/vendor/github.com/go-ini/ini/LICENSE
index 37ec93a..d361bbc 100644
--- a/vendor/github.com/go-ini/ini/LICENSE
+++ b/vendor/github.com/go-ini/ini/LICENSE
@@ -176,7 +176,7 @@ recommend that a file or class name and description of purpose be included on
the same "printed page" as the copyright notice for easier identification within
third-party archives.
- Copyright [yyyy] [name of copyright owner]
+ Copyright 2014 Unknwon
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md
index 8594742..f4ff27c 100644
--- a/vendor/github.com/go-ini/ini/README.md
+++ b/vendor/github.com/go-ini/ini/README.md
@@ -83,8 +83,8 @@ sec1, err := cfg.GetSection("Section")
sec2, err := cfg.GetSection("SecTIOn")
// key1 and key2 are the exactly same key object
-key1, err := cfg.GetKey("Key")
-key2, err := cfg.GetKey("KeY")
+key1, err := sec1.GetKey("Key")
+key2, err := sec2.GetKey("KeY")
```
#### MySQL-like boolean key
@@ -101,7 +101,7 @@ skip-name-resolve
By default, this is considered as missing value. But if you know you're going to deal with those cases, you can assign advanced load options:
```go
-cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf"))
+cfg, err := ini.LoadSources(ini.LoadOptions{AllowBooleanKeys: true}, "my.cnf"))
```
The value of those keys are always `true`, and when you save to a file, it will keep in the same foramt as you read.
@@ -122,6 +122,12 @@ Take care that following format will be treated as comment:
If you want to save a value with `#` or `;`, please quote them with ``` ` ``` or ``` """ ```.
+Alternatively, you can use following `LoadOptions` to completely ignore inline comments:
+
+```go
+cfg, err := ini.LoadSources(ini.LoadOptions{IgnoreInlineComment: true}, "app.ini"))
+```
+
### Working with sections
To get a section, you would need to:
@@ -323,6 +329,20 @@ foo = "some value" // foo: some value
bar = 'some value' // bar: some value
```
+Sometimes you downloaded file from [Crowdin](https://crowdin.com/) has values like the following (value is surrounded by double quotes and quotes in the value are escaped):
+
+```ini
+create_repo="created repository <a href=\"%s\">%s</a>"
+```
+
+How do you transform this to regular format automatically?
+
+```go
+cfg, err := ini.LoadSources(ini.LoadOptions{UnescapeValueDoubleQuotes: true}, "en-US.ini"))
+cfg.Section("<name of your section>").Key("create_repo").String()
+// You got: created repository <a href="%s">%s</a>
+```
+
That's all? Hmm, no.
#### Helper methods of working with values
@@ -474,7 +494,7 @@ cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"]
Sometimes, you have sections that do not contain key-value pairs but raw content, to handle such case, you can use `LoadOptions.UnparsableSections`:
```go
-cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS]
+cfg, err := ini.LoadSources(ini.LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS]
<1><L.Slide#2> This slide has the fuel listed in the wrong units <e.1>`))
body := cfg.Section("COMMENTS").Body()
@@ -567,7 +587,7 @@ Why not?
```go
type Embeded struct {
- Dates []time.Time `delim:"|"`
+ Dates []time.Time `delim:"|" comment:"Time data"`
Places []string `ini:"places,omitempty"`
None []int `ini:",omitempty"`
}
@@ -575,10 +595,10 @@ type Embeded struct {
type Author struct {
Name string `ini:"NAME"`
Male bool
- Age int
+ Age int `comment:"Author's age"`
GPA float64
NeverMind string `ini:"-"`
- *Embeded
+ *Embeded `comment:"Embeded section"`
}
func main() {
@@ -599,10 +619,13 @@ So, what do I get?
```ini
NAME = Unknwon
Male = true
+; Author's age
Age = 21
GPA = 2.8
+; Embeded section
[Embeded]
+; Time data
Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00
places = HangZhou,Boston
```
diff --git a/vendor/github.com/go-ini/ini/README_ZH.md b/vendor/github.com/go-ini/ini/README_ZH.md
index 163432d..69aefef 100644
--- a/vendor/github.com/go-ini/ini/README_ZH.md
+++ b/vendor/github.com/go-ini/ini/README_ZH.md
@@ -76,8 +76,8 @@ sec1, err := cfg.GetSection("Section")
sec2, err := cfg.GetSection("SecTIOn")
// key1 和 key2 指向同一个键对象
-key1, err := cfg.GetKey("Key")
-key2, err := cfg.GetKey("KeY")
+key1, err := sec1.GetKey("Key")
+key2, err := sec2.GetKey("KeY")
```
#### 类似 MySQL 配置中的布尔值键
@@ -94,7 +94,7 @@ skip-name-resolve
默认情况下这被认为是缺失值而无法完成解析,但可以通过高级的加载选项对它们进行处理:
```go
-cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf"))
+cfg, err := ini.LoadSources(ini.LoadOptions{AllowBooleanKeys: true}, "my.cnf"))
```
这些键的值永远为 `true`,且在保存到文件时也只会输出键名。
@@ -115,6 +115,12 @@ key, err := sec.NewBooleanKey("skip-host-cache")
如果你希望使用包含 `#` 或 `;` 的值,请使用 ``` ` ``` 或 ``` """ ``` 进行包覆。
+除此之外,您还可以通过 `LoadOptions` 完全忽略行内注释:
+
+```go
+cfg, err := ini.LoadSources(ini.LoadOptions{IgnoreInlineComment: true}, "app.ini"))
+```
+
### 操作分区(Section)
获取指定分区:
@@ -316,6 +322,20 @@ foo = "some value" // foo: some value
bar = 'some value' // bar: some value
```
+有时您会获得像从 [Crowdin](https://crowdin.com/) 网站下载的文件那样具有特殊格式的值(值使用双引号括起来,内部的双引号被转义):
+
+```ini
+create_repo="创建了仓库 <a href=\"%s\">%s</a>"
+```
+
+那么,怎么自动地将这类值进行处理呢?
+
+```go
+cfg, err := ini.LoadSources(ini.LoadOptions{UnescapeValueDoubleQuotes: true}, "en-US.ini"))
+cfg.Section("<name of your section>").Key("create_repo").String()
+// You got: 创建了仓库 <a href="%s">%s</a>
+```
+
这就是全部了?哈哈,当然不是。
#### 操作键值的辅助方法
@@ -467,7 +487,7 @@ cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"]
如果遇到一些比较特殊的分区,它们不包含常见的键值对,而是没有固定格式的纯文本,则可以使用 `LoadOptions.UnparsableSections` 进行处理:
```go
-cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS]
+cfg, err := LoadSources(ini.LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS]
<1><L.Slide#2> This slide has the fuel listed in the wrong units <e.1>`))
body := cfg.Section("COMMENTS").Body()
@@ -558,7 +578,7 @@ p := &Person{
```go
type Embeded struct {
- Dates []time.Time `delim:"|"`
+ Dates []time.Time `delim:"|" comment:"Time data"`
Places []string `ini:"places,omitempty"`
None []int `ini:",omitempty"`
}
@@ -566,10 +586,10 @@ type Embeded struct {
type Author struct {
Name string `ini:"NAME"`
Male bool
- Age int
+ Age int `comment:"Author's age"`
GPA float64
NeverMind string `ini:"-"`
- *Embeded
+ *Embeded `comment:"Embeded section"`
}
func main() {
@@ -590,10 +610,13 @@ func main() {
```ini
NAME = Unknwon
Male = true
+; Author's age
Age = 21
GPA = 2.8
+; Embeded section
[Embeded]
+; Time data
Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00
places = HangZhou,Boston
```
diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go
index 5211d5a..07dc62c 100644
--- a/vendor/github.com/go-ini/ini/ini.go
+++ b/vendor/github.com/go-ini/ini/ini.go
@@ -24,10 +24,8 @@ import (
"os"
"regexp"
"runtime"
- "strconv"
"strings"
"sync"
- "time"
)
const (
@@ -37,7 +35,7 @@ const (
// Maximum allowed depth when recursively substituing variable names.
_DEPTH_VALUES = 99
- _VERSION = "1.27.0"
+ _VERSION = "1.30.0"
)
// Version returns current package version literal.
@@ -60,6 +58,9 @@ var (
// Explicitly write DEFAULT section header
DefaultHeader = false
+
+ // Indicate whether to put a line between sections
+ PrettySection = true
)
func init() {
@@ -180,6 +181,8 @@ type LoadOptions struct {
AllowBooleanKeys bool
// AllowShadows indicates whether to keep track of keys with same name under same section.
AllowShadows bool
+ // UnescapeValueDoubleQuotes indicates whether to unescape double quotes inside value to regular format when value is surrounded by double quotes, e.g. key="a \"value\"" => key=a "value"
+ UnescapeValueDoubleQuotes bool
// Some INI formats allow group blocks that store a block of raw content that doesn't otherwise
// conform to key/value pairs. Specify the names of those blocks here.
UnparseableSections []string
@@ -395,10 +398,7 @@ func (f *File) Append(source interface{}, others ...interface{}) error {
return f.Reload()
}
-// WriteToIndent writes content into io.Writer with given indention.
-// If PrettyFormat has been set to be true,
-// it will align "=" sign with spaces under each section.
-func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) {
+func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) {
equalSign := "="
if PrettyFormat {
equalSign = " = "
@@ -411,15 +411,17 @@ func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) {
if len(sec.Comment) > 0 {
if sec.Comment[0] != '#' && sec.Comment[0] != ';' {
sec.Comment = "; " + sec.Comment
+ } else {
+ sec.Comment = sec.Comment[:1] + " " + strings.TrimSpace(sec.Comment[1:])
}
- if _, err = buf.WriteString(sec.Comment + LineBreak); err != nil {
- return 0, err
+ if _, err := buf.WriteString(sec.Comment + LineBreak); err != nil {
+ return nil, err
}
}
if i > 0 || DefaultHeader {
- if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil {
- return 0, err
+ if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil {
+ return nil, err
}
} else {
// Write nothing if default section is empty
@@ -429,8 +431,8 @@ func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) {
}
if sec.isRawSection {
- if _, err = buf.WriteString(sec.rawBody); err != nil {
- return 0, err
+ if _, err := buf.WriteString(sec.rawBody); err != nil {
+ return nil, err
}
continue
}
@@ -465,9 +467,11 @@ func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) {
}
if key.Comment[0] != '#' && key.Comment[0] != ';' {
key.Comment = "; " + key.Comment
+ } else {
+ key.Comment = key.Comment[:1] + " " + strings.TrimSpace(key.Comment[1:])
}
- if _, err = buf.WriteString(key.Comment + LineBreak); err != nil {
- return 0, err
+ if _, err := buf.WriteString(key.Comment + LineBreak); err != nil {
+ return nil, err
}
}
@@ -485,8 +489,8 @@ func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) {
}
for _, val := range key.ValueWithShadows() {
- if _, err = buf.WriteString(kname); err != nil {
- return 0, err
+ if _, err := buf.WriteString(kname); err != nil {
+ return nil, err
}
if key.isBooleanType {
@@ -504,21 +508,34 @@ func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) {
// In case key value contains "\n", "`", "\"", "#" or ";"
if strings.ContainsAny(val, "\n`") {
val = `"""` + val + `"""`
- } else if strings.ContainsAny(val, "#;") {
+ } else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") {
val = "`" + val + "`"
}
- if _, err = buf.WriteString(equalSign + val + LineBreak); err != nil {
- return 0, err
+ if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil {
+ return nil, err
}
}
}
- // Put a line between sections
- if _, err = buf.WriteString(LineBreak); err != nil {
- return 0, err
+ if PrettySection {
+ // Put a line between sections
+ if _, err := buf.WriteString(LineBreak); err != nil {
+ return nil, err
+ }
}
}
+ return buf, nil
+}
+
+// WriteToIndent writes content into io.Writer with given indention.
+// If PrettyFormat has been set to be true,
+// it will align "=" sign with spaces under each section.
+func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) {
+ buf, err := f.writeToBuffer(indent)
+ if err != nil {
+ return 0, err
+ }
return buf.WriteTo(w)
}
@@ -531,23 +548,12 @@ func (f *File) WriteTo(w io.Writer) (int64, error) {
func (f *File) SaveToIndent(filename, indent string) error {
// Note: Because we are truncating with os.Create,
// so it's safer to save to a temporary file location and rename afte done.
- tmpPath := filename + "." + strconv.Itoa(time.Now().Nanosecond()) + ".tmp"
- defer os.Remove(tmpPath)
-
- fw, err := os.Create(tmpPath)
+ buf, err := f.writeToBuffer(indent)
if err != nil {
return err
}
- if _, err = f.WriteToIndent(fw, indent); err != nil {
- fw.Close()
- return err
- }
- fw.Close()
-
- // Remove old file and rename the new one.
- os.Remove(filename)
- return os.Rename(tmpPath, filename)
+ return ioutil.WriteFile(filename, buf.Bytes(), 0666)
}
// SaveTo writes content to file system.
diff --git a/vendor/github.com/go-ini/ini/key.go b/vendor/github.com/go-ini/ini/key.go
index 838356a..0a2aa30 100644
--- a/vendor/github.com/go-ini/ini/key.go
+++ b/vendor/github.com/go-ini/ini/key.go
@@ -15,6 +15,7 @@
package ini
import (
+ "bytes"
"errors"
"fmt"
"strconv"
@@ -444,11 +445,39 @@ func (k *Key) Strings(delim string) []string {
return []string{}
}
- vals := strings.Split(str, delim)
- for i := range vals {
- // vals[i] = k.transformValue(strings.TrimSpace(vals[i]))
- vals[i] = strings.TrimSpace(vals[i])
+ runes := []rune(str)
+ vals := make([]string, 0, 2)
+ var buf bytes.Buffer
+ escape := false
+ idx := 0
+ for {
+ if escape {
+ escape = false
+ if runes[idx] != '\\' && !strings.HasPrefix(string(runes[idx:]), delim) {
+ buf.WriteRune('\\')
+ }
+ buf.WriteRune(runes[idx])
+ } else {
+ if runes[idx] == '\\' {
+ escape = true
+ } else if strings.HasPrefix(string(runes[idx:]), delim) {
+ idx += len(delim) - 1
+ vals = append(vals, strings.TrimSpace(buf.String()))
+ buf.Reset()
+ } else {
+ buf.WriteRune(runes[idx])
+ }
+ }
+ idx += 1
+ if idx == len(runes) {
+ break
+ }
}
+
+ if buf.Len() > 0 {
+ vals = append(vals, strings.TrimSpace(buf.String()))
+ }
+
return vals
}
diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go
index 6c0b107..861e366 100644
--- a/vendor/github.com/go-ini/ini/parser.go
+++ b/vendor/github.com/go-ini/ini/parser.go
@@ -189,11 +189,11 @@ func (p *parser) readContinuationLines(val string) (string, error) {
// are quotes \" or \'.
// It returns false if any other parts also contain same kind of quotes.
func hasSurroundedQuote(in string, quote byte) bool {
- return len(in) > 2 && in[0] == quote && in[len(in)-1] == quote &&
+ return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote &&
strings.IndexByte(in[1:], quote) == len(in)-2
}
-func (p *parser) readValue(in []byte, ignoreContinuation, ignoreInlineComment bool) (string, error) {
+func (p *parser) readValue(in []byte, ignoreContinuation, ignoreInlineComment, unescapeValueDoubleQuotes bool) (string, error) {
line := strings.TrimLeftFunc(string(in), unicode.IsSpace)
if len(line) == 0 {
return "", nil
@@ -204,6 +204,8 @@ func (p *parser) readValue(in []byte, ignoreContinuation, ignoreInlineComment bo
valQuote = `"""`
} else if line[0] == '`' {
valQuote = "`"
+ } else if unescapeValueDoubleQuotes && line[0] == '"' {
+ valQuote = `"`
}
if len(valQuote) > 0 {
@@ -214,6 +216,9 @@ func (p *parser) readValue(in []byte, ignoreContinuation, ignoreInlineComment bo
return p.readMultilines(line, line[startIdx:], valQuote)
}
+ if unescapeValueDoubleQuotes && valQuote == `"` {
+ return strings.Replace(line[startIdx:pos+startIdx], `\"`, `"`, -1), nil
+ }
return line[startIdx : pos+startIdx], nil
}
@@ -234,7 +239,7 @@ func (p *parser) readValue(in []byte, ignoreContinuation, ignoreInlineComment bo
}
}
- // Trim single quotes
+ // Trim single and double quotes
if hasSurroundedQuote(line, '\'') ||
hasSurroundedQuote(line, '"') {
line = line[1 : len(line)-1]
@@ -321,7 +326,10 @@ func (f *File) parse(reader io.Reader) (err error) {
if err != nil {
// Treat as boolean key when desired, and whole line is key name.
if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys {
- kname, err := p.readValue(line, f.options.IgnoreContinuation, f.options.IgnoreInlineComment)
+ kname, err := p.readValue(line,
+ f.options.IgnoreContinuation,
+ f.options.IgnoreInlineComment,
+ f.options.UnescapeValueDoubleQuotes)
if err != nil {
return err
}
@@ -344,7 +352,10 @@ func (f *File) parse(reader io.Reader) (err error) {
p.count++
}
- value, err := p.readValue(line[offset:], f.options.IgnoreContinuation, f.options.IgnoreInlineComment)
+ value, err := p.readValue(line[offset:],
+ f.options.IgnoreContinuation,
+ f.options.IgnoreInlineComment,
+ f.options.UnescapeValueDoubleQuotes)
if err != nil {
return err
}
diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go
index 031c78b..e0bff67 100644
--- a/vendor/github.com/go-ini/ini/struct.go
+++ b/vendor/github.com/go-ini/ini/struct.go
@@ -78,7 +78,7 @@ func parseDelim(actual string) string {
var reflectTime = reflect.TypeOf(time.Now()).Kind()
// setSliceWithProperType sets proper values to slice based on its type.
-func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow bool) error {
+func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error {
var strs []string
if allowShadow {
strs = key.StringsWithShadows(delim)
@@ -92,26 +92,30 @@ func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowSh
}
var vals interface{}
+ var err error
sliceOf := field.Type().Elem().Kind()
switch sliceOf {
case reflect.String:
vals = strs
case reflect.Int:
- vals, _ = key.parseInts(strs, true, false)
+ vals, err = key.parseInts(strs, true, false)
case reflect.Int64:
- vals, _ = key.parseInt64s(strs, true, false)
+ vals, err = key.parseInt64s(strs, true, false)
case reflect.Uint:
- vals, _ = key.parseUints(strs, true, false)
+ vals, err = key.parseUints(strs, true, false)
case reflect.Uint64:
- vals, _ = key.parseUint64s(strs, true, false)
+ vals, err = key.parseUint64s(strs, true, false)
case reflect.Float64:
- vals, _ = key.parseFloat64s(strs, true, false)
+ vals, err = key.parseFloat64s(strs, true, false)
case reflectTime:
- vals, _ = key.parseTimesFormat(time.RFC3339, strs, true, false)
+ vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false)
default:
return fmt.Errorf("unsupported type '[]%s'", sliceOf)
}
+ if err != nil && isStrict {
+ return err
+ }
slice := reflect.MakeSlice(field.Type(), numVals, numVals)
for i := 0; i < numVals; i++ {
@@ -136,10 +140,17 @@ func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowSh
return nil
}
+func wrapStrictError(err error, isStrict bool) error {
+ if isStrict {
+ return err
+ }
+ return nil
+}
+
// setWithProperType sets proper value to field based on its type,
// but it does not return error for failing parsing,
// because we want to use default value that is already assigned to strcut.
-func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow bool) error {
+func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error {
switch t.Kind() {
case reflect.String:
if len(key.String()) == 0 {
@@ -149,7 +160,7 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri
case reflect.Bool:
boolVal, err := key.Bool()
if err != nil {
- return nil
+ return wrapStrictError(err, isStrict)
}
field.SetBool(boolVal)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
@@ -161,8 +172,8 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri
}
intVal, err := key.Int64()
- if err != nil || intVal == 0 {
- return nil
+ if err != nil {
+ return wrapStrictError(err, isStrict)
}
field.SetInt(intVal)
// byte is an alias for uint8, so supporting uint8 breaks support for byte
@@ -176,24 +187,24 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri
uintVal, err := key.Uint64()
if err != nil {
- return nil
+ return wrapStrictError(err, isStrict)
}
field.SetUint(uintVal)
case reflect.Float32, reflect.Float64:
floatVal, err := key.Float64()
if err != nil {
- return nil
+ return wrapStrictError(err, isStrict)
}
field.SetFloat(floatVal)
case reflectTime:
timeVal, err := key.Time()
if err != nil {
- return nil
+ return wrapStrictError(err, isStrict)
}
field.Set(reflect.ValueOf(timeVal))
case reflect.Slice:
- return setSliceWithProperType(key, field, delim, allowShadow)
+ return setSliceWithProperType(key, field, delim, allowShadow, isStrict)
default:
return fmt.Errorf("unsupported type '%s'", t)
}
@@ -212,7 +223,7 @@ func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bo
return rawName, omitEmpty, allowShadow
}
-func (s *Section) mapTo(val reflect.Value) error {
+func (s *Section) mapTo(val reflect.Value, isStrict bool) error {
if val.Kind() == reflect.Ptr {
val = val.Elem()
}
@@ -241,7 +252,7 @@ func (s *Section) mapTo(val reflect.Value) error {
if isAnonymous || isStruct {
if sec, err := s.f.GetSection(fieldName); err == nil {
- if err = sec.mapTo(field); err != nil {
+ if err = sec.mapTo(field, isStrict); err != nil {
return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
}
continue
@@ -250,7 +261,7 @@ func (s *Section) mapTo(val reflect.Value) error {
if key, err := s.GetKey(fieldName); err == nil {
delim := parseDelim(tpField.Tag.Get("delim"))
- if err = setWithProperType(tpField.Type, key, field, delim, allowShadow); err != nil {
+ if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil {
return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
}
}
@@ -269,7 +280,22 @@ func (s *Section) MapTo(v interface{}) error {
return errors.New("cannot map to non-pointer struct")
}
- return s.mapTo(val)
+ return s.mapTo(val, false)
+}
+
+// MapTo maps section to given struct in strict mode,
+// which returns all possible error including value parsing error.
+func (s *Section) StrictMapTo(v interface{}) error {
+ typ := reflect.TypeOf(v)
+ val := reflect.ValueOf(v)
+ if typ.Kind() == reflect.Ptr {
+ typ = typ.Elem()
+ val = val.Elem()
+ } else {
+ return errors.New("cannot map to non-pointer struct")
+ }
+
+ return s.mapTo(val, true)
}
// MapTo maps file to given struct.
@@ -277,6 +303,12 @@ func (f *File) MapTo(v interface{}) error {
return f.Section("").MapTo(v)
}
+// MapTo maps file to given struct in strict mode,
+// which returns all possible error including value parsing error.
+func (f *File) StrictMapTo(v interface{}) error {
+ return f.Section("").StrictMapTo(v)
+}
+
// MapTo maps data sources to given struct with name mapper.
func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
cfg, err := Load(source, others...)
@@ -287,11 +319,28 @@ func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, other
return cfg.MapTo(v)
}
+// StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode,
+// which returns all possible error including value parsing error.
+func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
+ cfg, err := Load(source, others...)
+ if err != nil {
+ return err
+ }
+ cfg.NameMapper = mapper
+ return cfg.StrictMapTo(v)
+}
+
// MapTo maps data sources to given struct.
func MapTo(v, source interface{}, others ...interface{}) error {
return MapToWithMapper(v, nil, source, others...)
}
+// StrictMapTo maps data sources to given struct in strict mode,
+// which returns all possible error including value parsing error.
+func StrictMapTo(v, source interface{}, others ...interface{}) error {
+ return StrictMapToWithMapper(v, nil, source, others...)
+}
+
// reflectSliceWithProperType does the opposite thing as setSliceWithProperType.
func reflectSliceWithProperType(key *Key, field reflect.Value, delim string) error {
slice := field.Slice(0, field.Len())
@@ -359,10 +408,11 @@ func isEmptyValue(v reflect.Value) bool {
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
- case reflectTime:
- return v.Interface().(time.Time).IsZero()
case reflect.Interface, reflect.Ptr:
return v.IsNil()
+ case reflectTime:
+ t, ok := v.Interface().(time.Time)
+ return ok && t.IsZero()
}
return false
}
@@ -400,6 +450,12 @@ func (s *Section) reflectFrom(val reflect.Value) error {
// Note: fieldName can never be empty here, ignore error.
sec, _ = s.f.NewSection(fieldName)
}
+
+ // Add comment from comment tag
+ if len(sec.Comment) == 0 {
+ sec.Comment = tpField.Tag.Get("comment")
+ }
+
if err = sec.reflectFrom(field); err != nil {
return fmt.Errorf("error reflecting field (%s): %v", fieldName, err)
}
@@ -411,6 +467,12 @@ func (s *Section) reflectFrom(val reflect.Value) error {
if err != nil {
key, _ = s.NewKey(fieldName, "")
}
+
+ // Add comment from comment tag
+ if len(key.Comment) == 0 {
+ key.Comment = tpField.Tag.Get("comment")
+ }
+
if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil {
return fmt.Errorf("error reflecting field (%s): %v", fieldName, err)
}
diff --git a/vendor/github.com/go-sql-driver/mysql/AUTHORS b/vendor/github.com/go-sql-driver/mysql/AUTHORS
index e47e542..ac36be9 100644
--- a/vendor/github.com/go-sql-driver/mysql/AUTHORS
+++ b/vendor/github.com/go-sql-driver/mysql/AUTHORS
@@ -12,38 +12,52 @@
# Individual Persons
Aaron Hopkins <go-sql-driver at die.net>
+Achille Roussel <achille.roussel at gmail.com>
Arne Hormann <arnehormann at gmail.com>
+Asta Xie <xiemengjun at gmail.com>
+Bulat Gaifullin <gaifullinbf at gmail.com>
Carlos Nieto <jose.carlos at menteslibres.net>
Chris Moos <chris at tech9computers.com>
Daniel Nichter <nil at codenode.com>
Daniël van Eeden <git at myname.nl>
+Dave Protasowski <dprotaso at gmail.com>
DisposaBoy <disposaboy at dby.me>
Egor Smolyakov <egorsmkv at gmail.com>
+Evan Shaw <evan at vendhq.com>
Frederick Mayle <frederickmayle at gmail.com>
Gustavo Kristic <gkristic at gmail.com>
Hanno Braun <mail at hannobraun.com>
Henri Yandell <flamefew at gmail.com>
Hirotaka Yamamoto <ymmt2005 at gmail.com>
+ICHINOSE Shogo <shogo82148 at gmail.com>
INADA Naoki <songofacandy at gmail.com>
Jacek Szwec <szwec.jacek at gmail.com>
James Harr <james.harr at gmail.com>
+Jeff Hodges <jeff at somethingsimilar.com>
+Jeffrey Charles <jeffreycharles at gmail.com>
Jian Zhen <zhenjl at gmail.com>
Joshua Prunier <joshua.prunier at gmail.com>
Julien Lefevre <julien.lefevr at gmail.com>
Julien Schmidt <go-sql-driver at julienschmidt.com>
+Justin Nuß <nuss.justin at gmail.com>
Kamil Dziedzic <kamil at klecza.pl>
Kevin Malachowski <kevin at chowski.com>
Lennart Rudolph <lrudolph at hmc.edu>
Leonardo YongUk Kim <dalinaum at gmail.com>
+Lion Yang <lion at aosc.xyz>
Luca Looz <luca.looz92 at gmail.com>
Lucas Liu <extrafliu at gmail.com>
Luke Scott <luke at webconnex.com>
+Maciej Zimnoch <maciej.zimnoch@codilime.com>
Michael Woolnough <michael.woolnough at gmail.com>
Nicola Peduzzi <thenikso at gmail.com>
Olivier Mengué <dolmen at cpan.org>
+oscarzhao <oscarzhaosl at gmail.com>
Paul Bonser <misterpib at gmail.com>
Peter Schultz <peter.schultz at classmarkets.com>
+Rebecca Chin <rchin at pivotal.io>
Runrioter Wung <runrioter at gmail.com>
+Shuode Li <elemount at qq.com>
Soroush Pour <me at soroushjp.com>
Stan Putrya <root.vagner at gmail.com>
Stanley Gunawan <gunawan.stanley at gmail.com>
@@ -56,4 +70,6 @@ Zhenye Xie <xiezhenye at gmail.com>
Barracuda Networks, Inc.
Google Inc.
+Keybase Inc.
+Pivotal Inc.
Stripe Inc.
diff --git a/vendor/github.com/go-sql-driver/mysql/README.md b/vendor/github.com/go-sql-driver/mysql/README.md
index a060e3c..d24aaa0 100644
--- a/vendor/github.com/go-sql-driver/mysql/README.md
+++ b/vendor/github.com/go-sql-driver/mysql/README.md
@@ -16,6 +16,8 @@ A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) pac
* [Parameters](#parameters)
* [Examples](#examples)
* [Connection pool and timeouts](#connection-pool-and-timeouts)
+ * [context.Context Support](#contextcontext-support)
+ * [ColumnType Support](#columntype-support)
* [LOAD DATA LOCAL INFILE support](#load-data-local-infile-support)
* [time.Time support](#timetime-support)
* [Unicode support](#unicode-support)
@@ -38,7 +40,7 @@ A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) pac
* Optional placeholder interpolation
## Requirements
- * Go 1.2 or higher
+ * Go 1.5 or higher
* MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+)
---------------------------------------
@@ -46,7 +48,7 @@ A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) pac
## Installation
Simple install the package to your [$GOPATH](https://github.com/golang/go/wiki/GOPATH "GOPATH") with the [go tool](https://golang.org/cmd/go/ "go command") from shell:
```bash
-$ go get github.com/go-sql-driver/mysql
+$ go get -u github.com/go-sql-driver/mysql
```
Make sure [Git is installed](https://git-scm.com/downloads) on your machine and in your system's `PATH`.
@@ -100,7 +102,8 @@ See [net.Dial](https://golang.org/pkg/net/#Dial) for more information which netw
In general you should use an Unix domain socket if available and TCP otherwise for best performance.
#### Address
-For TCP and UDP networks, addresses have the form `host:port`.
+For TCP and UDP networks, addresses have the form `host[:port]`.
+If `port` is omitted, the default port will be used.
If `host` is a literal IPv6 address, it must be enclosed in square brackets.
The functions [net.JoinHostPort](https://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](https://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form.
@@ -137,9 +140,9 @@ Default: false
```
Type: bool
Valid Values: true, false
-Default: false
+Default: true
```
-`allowNativePasswords=true` allows the usage of the mysql native password method.
+`allowNativePasswords=false` disallows the usage of MySQL native password method.
##### `allowOldPasswords`
@@ -230,10 +233,10 @@ Please keep in mind, that param values must be [url.QueryEscape](https://golang.
##### `maxAllowedPacket`
```
Type: decimal number
-Default: 0
+Default: 4194304
```
-Max packet size allowed in bytes. Use `maxAllowedPacket=0` to automatically fetch the `max_allowed_packet` variable from server.
+Max packet size allowed in bytes. The default value is 4 MiB and should be adjusted to match the server settings. `maxAllowedPacket=0` can be used to automatically fetch the `max_allowed_packet` variable from server *on every connection*.
##### `multiStatements`
@@ -267,7 +270,7 @@ Default: 0
I/O read timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
-##### `strict`
+##### `rejectReadOnly`
```
Type: bool
@@ -275,11 +278,27 @@ Valid Values: true, false
Default: false
```
-`strict=true` enables a driver-side strict mode in which MySQL warnings are treated as errors. This mode should not be used in production as it may lead to data corruption in certain situations.
-A server-side strict mode, which is safe for production use, can be set via the [`sql_mode`](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html) system variable.
+`rejectReadOnly=true` causes the driver to reject read-only connections. This
+is for a possible race condition during an automatic failover, where the mysql
+client gets connected to a read-only replica after the failover.
+
+Note that this should be a fairly rare case, as an automatic failover normally
+happens when the primary is down, and the race condition shouldn't happen
+unless it comes back up online as soon as the failover is kicked off. On the
+other hand, when this happens, a MySQL application can get stuck on a
+read-only connection until restarted. It is however fairly easy to reproduce,
+for example, using a manual failover on AWS Aurora's MySQL-compatible cluster.
+
+If you are not relying on read-only transactions to reject writes that aren't
+supposed to happen, setting this on some MySQL providers (such as AWS Aurora)
+is safer for failovers.
+
+Note that ERROR 1290 can be returned for a `read-only` server and this option will
+cause a retry for that error. However the same error number is used for some
+other cases. You should ensure your application will never cause an ERROR 1290
+except for `read-only` mode when enabling this option.
-By default MySQL also treats notes as warnings. Use [`sql_notes=false`](http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_sql_notes) to ignore notes.
##### `timeout`
@@ -290,6 +309,7 @@ Default: OS default
Timeout for establishing connections, aka dial timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+
##### `tls`
```
@@ -300,6 +320,7 @@ Default: false
`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side). Use a custom value registered with [`mysql.RegisterTLSConfig`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig).
+
##### `writeTimeout`
```
@@ -318,9 +339,9 @@ Any other parameters are interpreted as system variables:
* `<string_var>=%27<value>%27`: `SET <string_var>='<value>'`
Rules:
-* The values for string variables must be quoted with '
+* The values for string variables must be quoted with `'`.
* The values must also be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed!
- (which implies values of string variables must be wrapped with `%27`)
+ (which implies values of string variables must be wrapped with `%27`).
Examples:
* `autocommit=1`: `SET autocommit=1`
@@ -385,6 +406,13 @@ user:password@/
### Connection pool and timeouts
The connection pool is managed by Go's database/sql package. For details on how to configure the size of the pool and how long connections stay in the pool see `*DB.SetMaxOpenConns`, `*DB.SetMaxIdleConns`, and `*DB.SetConnMaxLifetime` in the [database/sql documentation](https://golang.org/pkg/database/sql/). The read, write, and dial timeouts for each individual connection are configured with the DSN parameters [`readTimeout`](#readtimeout), [`writeTimeout`](#writetimeout), and [`timeout`](#timeout), respectively.
+## `ColumnType` Support
+This driver supports the [`ColumnType` interface](https://golang.org/pkg/database/sql/#ColumnType) introduced in Go 1.8, with the exception of [`ColumnType.Length()`](https://golang.org/pkg/database/sql/#ColumnType.Length), which is currently not supported.
+
+## `context.Context` Support
+Go 1.8 added `database/sql` support for `context.Context`. This driver supports query timeouts and cancellation via contexts.
+See [context support in the database/sql package](https://golang.org/doc/go1.8#database_sql) for more details.
+
### `LOAD DATA LOCAL INFILE` support
For this feature you need direct access to the package. Therefore you must change the import path (no `_`):
@@ -400,7 +428,7 @@ See the [godoc of Go-MySQL-Driver](https://godoc.org/github.com/go-sql-driver/my
### `time.Time` support
-The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your programm.
+The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your program.
However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical opposite in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](https://golang.org/pkg/time/#Location) with the `loc` DSN parameter.
@@ -418,7 +446,6 @@ Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAM
See http://dev.mysql.com/doc/refman/5.7/en/charset-unicode.html for more details on MySQL's Unicode support.
-
## Testing / Development
To run the driver tests you may need to adjust the configuration. See the [Testing Wiki-Page](https://github.com/go-sql-driver/mysql/wiki/Testing "Testing") for details.
@@ -437,13 +464,13 @@ Mozilla summarizes the license scope as follows:
That means:
- * You can **use** the **unchanged** source code both in private and commercially
- * When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0)
- * You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged**
+ * You can **use** the **unchanged** source code both in private and commercially.
+ * When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0).
+ * You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged**.
Please read the [MPL 2.0 FAQ](https://www.mozilla.org/en-US/MPL/2.0/FAQ/) if you have further questions regarding the license.
-You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE)
+You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE).
![Go Gopher and MySQL Dolphin](https://raw.github.com/wiki/go-sql-driver/mysql/go-mysql-driver_m.jpg "Golang Gopher transporting the MySQL Dolphin in a wheelbarrow")
diff --git a/vendor/github.com/go-sql-driver/mysql/connection.go b/vendor/github.com/go-sql-driver/mysql/connection.go
index 08e5fad..e570614 100644
--- a/vendor/github.com/go-sql-driver/mysql/connection.go
+++ b/vendor/github.com/go-sql-driver/mysql/connection.go
@@ -17,6 +17,16 @@ import (
"time"
)
+// a copy of context.Context for Go 1.7 and earlier
+type mysqlContext interface {
+ Done() <-chan struct{}
+ Err() error
+
+ // defined in context.Context, but not used in this driver:
+ // Deadline() (deadline time.Time, ok bool)
+ // Value(key interface{}) interface{}
+}
+
type mysqlConn struct {
buf buffer
netConn net.Conn
@@ -30,7 +40,14 @@ type mysqlConn struct {
status statusFlag
sequence uint8
parseTime bool
- strict bool
+
+ // for context support (Go 1.8+)
+ watching bool
+ watcher chan<- mysqlContext
+ closech chan struct{}
+ finished chan<- struct{}
+ canceled atomicError // set non-nil if conn is canceled
+ closed atomicBool // set when conn is closed, before closech is closed
}
// Handles parameters set in DSN after the connection is established
@@ -63,22 +80,41 @@ func (mc *mysqlConn) handleParams() (err error) {
return
}
+func (mc *mysqlConn) markBadConn(err error) error {
+ if mc == nil {
+ return err
+ }
+ if err != errBadConnNoWrite {
+ return err
+ }
+ return driver.ErrBadConn
+}
+
func (mc *mysqlConn) Begin() (driver.Tx, error) {
- if mc.netConn == nil {
+ return mc.begin(false)
+}
+
+func (mc *mysqlConn) begin(readOnly bool) (driver.Tx, error) {
+ if mc.closed.IsSet() {
errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
- err := mc.exec("START TRANSACTION")
+ var q string
+ if readOnly {
+ q = "START TRANSACTION READ ONLY"
+ } else {
+ q = "START TRANSACTION"
+ }
+ err := mc.exec(q)
if err == nil {
return &mysqlTx{mc}, err
}
-
- return nil, err
+ return nil, mc.markBadConn(err)
}
func (mc *mysqlConn) Close() (err error) {
// Makes Close idempotent
- if mc.netConn != nil {
+ if !mc.closed.IsSet() {
err = mc.writeCommandPacket(comQuit)
}
@@ -92,26 +128,39 @@ func (mc *mysqlConn) Close() (err error) {
// is called before auth or on auth failure because MySQL will have already
// closed the network connection.
func (mc *mysqlConn) cleanup() {
+ if !mc.closed.TrySet(true) {
+ return
+ }
+
// Makes cleanup idempotent
- if mc.netConn != nil {
- if err := mc.netConn.Close(); err != nil {
- errLog.Print(err)
+ close(mc.closech)
+ if mc.netConn == nil {
+ return
+ }
+ if err := mc.netConn.Close(); err != nil {
+ errLog.Print(err)
+ }
+}
+
+func (mc *mysqlConn) error() error {
+ if mc.closed.IsSet() {
+ if err := mc.canceled.Value(); err != nil {
+ return err
}
- mc.netConn = nil
+ return ErrInvalidConn
}
- mc.cfg = nil
- mc.buf.nc = nil
+ return nil
}
func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) {
- if mc.netConn == nil {
+ if mc.closed.IsSet() {
errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
// Send command
err := mc.writeCommandPacketStr(comStmtPrepare, query)
if err != nil {
- return nil, err
+ return nil, mc.markBadConn(err)
}
stmt := &mysqlStmt{
@@ -145,7 +194,7 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin
if buf == nil {
// can not take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
- return "", driver.ErrBadConn
+ return "", ErrInvalidConn
}
buf = buf[:0]
argPos := 0
@@ -258,7 +307,7 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin
}
func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) {
- if mc.netConn == nil {
+ if mc.closed.IsSet() {
errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
@@ -272,7 +321,6 @@ func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, err
return nil, err
}
query = prepared
- args = nil
}
mc.affectedRows = 0
mc.insertId = 0
@@ -284,14 +332,14 @@ func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, err
insertId: int64(mc.insertId),
}, err
}
- return nil, err
+ return nil, mc.markBadConn(err)
}
// Internal function to execute commands
func (mc *mysqlConn) exec(query string) error {
// Send command
if err := mc.writeCommandPacketStr(comQuery, query); err != nil {
- return err
+ return mc.markBadConn(err)
}
// Read Result
@@ -316,7 +364,11 @@ func (mc *mysqlConn) exec(query string) error {
}
func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) {
- if mc.netConn == nil {
+ return mc.query(query, args)
+}
+
+func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error) {
+ if mc.closed.IsSet() {
errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
@@ -330,7 +382,6 @@ func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, erro
return nil, err
}
query = prepared
- args = nil
}
// Send command
err := mc.writeCommandPacketStr(comQuery, query)
@@ -352,12 +403,13 @@ func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, erro
return nil, err
}
}
+
// Columns
rows.rs.columns, err = mc.readColumns(resLen)
return rows, err
}
}
- return nil, err
+ return nil, mc.markBadConn(err)
}
// Gets the value of the given MySQL System Variable
@@ -389,3 +441,21 @@ func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) {
}
return nil, err
}
+
+// finish is called when the query has canceled.
+func (mc *mysqlConn) cancel(err error) {
+ mc.canceled.Set(err)
+ mc.cleanup()
+}
+
+// finish is called when the query has succeeded.
+func (mc *mysqlConn) finish() {
+ if !mc.watching || mc.finished == nil {
+ return
+ }
+ select {
+ case mc.finished <- struct{}{}:
+ mc.watching = false
+ case <-mc.closech:
+ }
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/connection_go18.go b/vendor/github.com/go-sql-driver/mysql/connection_go18.go
new file mode 100644
index 0000000..48a9cca
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/connection_go18.go
@@ -0,0 +1,197 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// +build go1.8
+
+package mysql
+
+import (
+ "context"
+ "database/sql"
+ "database/sql/driver"
+)
+
+// Ping implements driver.Pinger interface
+func (mc *mysqlConn) Ping(ctx context.Context) error {
+ if mc.closed.IsSet() {
+ errLog.Print(ErrInvalidConn)
+ return driver.ErrBadConn
+ }
+
+ if err := mc.watchCancel(ctx); err != nil {
+ return err
+ }
+ defer mc.finish()
+
+ if err := mc.writeCommandPacket(comPing); err != nil {
+ return err
+ }
+ if _, err := mc.readResultOK(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// BeginTx implements driver.ConnBeginTx interface
+func (mc *mysqlConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
+ if err := mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+ defer mc.finish()
+
+ if sql.IsolationLevel(opts.Isolation) != sql.LevelDefault {
+ level, err := mapIsolationLevel(opts.Isolation)
+ if err != nil {
+ return nil, err
+ }
+ err = mc.exec("SET TRANSACTION ISOLATION LEVEL " + level)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return mc.begin(opts.ReadOnly)
+}
+
+func (mc *mysqlConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+
+ rows, err := mc.query(query, dargs)
+ if err != nil {
+ mc.finish()
+ return nil, err
+ }
+ rows.finish = mc.finish
+ return rows, err
+}
+
+func (mc *mysqlConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+ defer mc.finish()
+
+ return mc.Exec(query, dargs)
+}
+
+func (mc *mysqlConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
+ if err := mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+
+ stmt, err := mc.Prepare(query)
+ mc.finish()
+ if err != nil {
+ return nil, err
+ }
+
+ select {
+ default:
+ case <-ctx.Done():
+ stmt.Close()
+ return nil, ctx.Err()
+ }
+ return stmt, nil
+}
+
+func (stmt *mysqlStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := stmt.mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+
+ rows, err := stmt.query(dargs)
+ if err != nil {
+ stmt.mc.finish()
+ return nil, err
+ }
+ rows.finish = stmt.mc.finish
+ return rows, err
+}
+
+func (stmt *mysqlStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
+ dargs, err := namedValueToValue(args)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := stmt.mc.watchCancel(ctx); err != nil {
+ return nil, err
+ }
+ defer stmt.mc.finish()
+
+ return stmt.Exec(dargs)
+}
+
+func (mc *mysqlConn) watchCancel(ctx context.Context) error {
+ if mc.watching {
+ // Reach here if canceled,
+ // so the connection is already invalid
+ mc.cleanup()
+ return nil
+ }
+ if ctx.Done() == nil {
+ return nil
+ }
+
+ mc.watching = true
+ select {
+ default:
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ if mc.watcher == nil {
+ return nil
+ }
+
+ mc.watcher <- ctx
+
+ return nil
+}
+
+func (mc *mysqlConn) startWatcher() {
+ watcher := make(chan mysqlContext, 1)
+ mc.watcher = watcher
+ finished := make(chan struct{})
+ mc.finished = finished
+ go func() {
+ for {
+ var ctx mysqlContext
+ select {
+ case ctx = <-watcher:
+ case <-mc.closech:
+ return
+ }
+
+ select {
+ case <-ctx.Done():
+ mc.cancel(ctx.Err())
+ case <-finished:
+ case <-mc.closech:
+ return
+ }
+ }
+ }()
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/const.go b/vendor/github.com/go-sql-driver/mysql/const.go
index 88cfff3..4a19ca5 100644
--- a/vendor/github.com/go-sql-driver/mysql/const.go
+++ b/vendor/github.com/go-sql-driver/mysql/const.go
@@ -9,7 +9,8 @@
package mysql
const (
- minProtocolVersion byte = 10
+ defaultMaxAllowedPacket = 4 << 20 // 4 MiB
+ minProtocolVersion = 10
maxPacketSize = 1<<24 - 1
timeFormat = "2006-01-02 15:04:05.999999"
)
@@ -87,8 +88,10 @@ const (
)
// https://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnType
+type fieldType byte
+
const (
- fieldTypeDecimal byte = iota
+ fieldTypeDecimal fieldType = iota
fieldTypeTiny
fieldTypeShort
fieldTypeLong
@@ -107,7 +110,7 @@ const (
fieldTypeBit
)
const (
- fieldTypeJSON byte = iota + 0xf5
+ fieldTypeJSON fieldType = iota + 0xf5
fieldTypeNewDecimal
fieldTypeEnum
fieldTypeSet
diff --git a/vendor/github.com/go-sql-driver/mysql/driver.go b/vendor/github.com/go-sql-driver/mysql/driver.go
index 0022d1f..d42ce7a 100644
--- a/vendor/github.com/go-sql-driver/mysql/driver.go
+++ b/vendor/github.com/go-sql-driver/mysql/driver.go
@@ -4,7 +4,7 @@
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
// You can obtain one at http://mozilla.org/MPL/2.0/.
-// Package mysql provides a MySQL driver for Go's database/sql package
+// Package mysql provides a MySQL driver for Go's database/sql package.
//
// The driver should be used via the database/sql package:
//
@@ -22,6 +22,11 @@ import (
"net"
)
+// watcher interface is used for context support (From Go 1.8)
+type watcher interface {
+ startWatcher()
+}
+
// MySQLDriver is exported to make the driver directly accessible.
// In general the driver is used via the database/sql package.
type MySQLDriver struct{}
@@ -52,13 +57,13 @@ func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
mc := &mysqlConn{
maxAllowedPacket: maxPacketSize,
maxWriteSize: maxPacketSize - 1,
+ closech: make(chan struct{}),
}
mc.cfg, err = ParseDSN(dsn)
if err != nil {
return nil, err
}
mc.parseTime = mc.cfg.ParseTime
- mc.strict = mc.cfg.Strict
// Connect to Server
if dial, ok := dials[mc.cfg.Net]; ok {
@@ -81,6 +86,11 @@ func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
}
}
+ // Call startWatcher for context support (From Go 1.8)
+ if s, ok := interface{}(mc).(watcher); ok {
+ s.startWatcher()
+ }
+
mc.buf = newBuffer(mc.netConn)
// Set I/O timeouts
diff --git a/vendor/github.com/go-sql-driver/mysql/dsn.go b/vendor/github.com/go-sql-driver/mysql/dsn.go
index ac00dce..3ade963 100644
--- a/vendor/github.com/go-sql-driver/mysql/dsn.go
+++ b/vendor/github.com/go-sql-driver/mysql/dsn.go
@@ -15,6 +15,7 @@ import (
"fmt"
"net"
"net/url"
+ "sort"
"strconv"
"strings"
"time"
@@ -27,7 +28,9 @@ var (
errInvalidDSNUnsafeCollation = errors.New("invalid DSN: interpolateParams can not be used with unsafe collations")
)
-// Config is a configuration parsed from a DSN string
+// Config is a configuration parsed from a DSN string.
+// If a new Config is created instead of being parsed from a DSN string,
+// the NewConfig function should be used, which sets default values.
type Config struct {
User string // Username
Passwd string // Password (requires User)
@@ -53,7 +56,45 @@ type Config struct {
InterpolateParams bool // Interpolate placeholders into query string
MultiStatements bool // Allow multiple statements in one query
ParseTime bool // Parse time values to time.Time
- Strict bool // Return warnings as errors
+ RejectReadOnly bool // Reject read-only connections
+}
+
+// NewConfig creates a new Config and sets default values.
+func NewConfig() *Config {
+ return &Config{
+ Collation: defaultCollation,
+ Loc: time.UTC,
+ MaxAllowedPacket: defaultMaxAllowedPacket,
+ AllowNativePasswords: true,
+ }
+}
+
+func (cfg *Config) normalize() error {
+ if cfg.InterpolateParams && unsafeCollations[cfg.Collation] {
+ return errInvalidDSNUnsafeCollation
+ }
+
+ // Set default network if empty
+ if cfg.Net == "" {
+ cfg.Net = "tcp"
+ }
+
+ // Set default address if empty
+ if cfg.Addr == "" {
+ switch cfg.Net {
+ case "tcp":
+ cfg.Addr = "127.0.0.1:3306"
+ case "unix":
+ cfg.Addr = "/tmp/mysql.sock"
+ default:
+ return errors.New("default addr for network '" + cfg.Net + "' unknown")
+ }
+
+ } else if cfg.Net == "tcp" {
+ cfg.Addr = ensureHavePort(cfg.Addr)
+ }
+
+ return nil
}
// FormatDSN formats the given Config into a DSN string which can be passed to
@@ -102,12 +143,12 @@ func (cfg *Config) FormatDSN() string {
}
}
- if cfg.AllowNativePasswords {
+ if !cfg.AllowNativePasswords {
if hasParam {
- buf.WriteString("&allowNativePasswords=true")
+ buf.WriteString("&allowNativePasswords=false")
} else {
hasParam = true
- buf.WriteString("?allowNativePasswords=true")
+ buf.WriteString("?allowNativePasswords=false")
}
}
@@ -195,12 +236,12 @@ func (cfg *Config) FormatDSN() string {
buf.WriteString(cfg.ReadTimeout.String())
}
- if cfg.Strict {
+ if cfg.RejectReadOnly {
if hasParam {
- buf.WriteString("&strict=true")
+ buf.WriteString("&rejectReadOnly=true")
} else {
hasParam = true
- buf.WriteString("?strict=true")
+ buf.WriteString("?rejectReadOnly=true")
}
}
@@ -234,7 +275,7 @@ func (cfg *Config) FormatDSN() string {
buf.WriteString(cfg.WriteTimeout.String())
}
- if cfg.MaxAllowedPacket > 0 {
+ if cfg.MaxAllowedPacket != defaultMaxAllowedPacket {
if hasParam {
buf.WriteString("&maxAllowedPacket=")
} else {
@@ -247,7 +288,12 @@ func (cfg *Config) FormatDSN() string {
// other params
if cfg.Params != nil {
- for param, value := range cfg.Params {
+ var params []string
+ for param := range cfg.Params {
+ params = append(params, param)
+ }
+ sort.Strings(params)
+ for _, param := range params {
if hasParam {
buf.WriteByte('&')
} else {
@@ -257,7 +303,7 @@ func (cfg *Config) FormatDSN() string {
buf.WriteString(param)
buf.WriteByte('=')
- buf.WriteString(url.QueryEscape(value))
+ buf.WriteString(url.QueryEscape(cfg.Params[param]))
}
}
@@ -267,10 +313,7 @@ func (cfg *Config) FormatDSN() string {
// ParseDSN parses the DSN string to a Config
func ParseDSN(dsn string) (cfg *Config, err error) {
// New config with some default values
- cfg = &Config{
- Loc: time.UTC,
- Collation: defaultCollation,
- }
+ cfg = NewConfig()
// [user[:password]@][net[(addr)]]/dbname[?param1=value1&paramN=valueN]
// Find the last '/' (since the password or the net addr might contain a '/')
@@ -338,28 +381,9 @@ func ParseDSN(dsn string) (cfg *Config, err error) {
return nil, errInvalidDSNNoSlash
}
- if cfg.InterpolateParams && unsafeCollations[cfg.Collation] {
- return nil, errInvalidDSNUnsafeCollation
+ if err = cfg.normalize(); err != nil {
+ return nil, err
}
-
- // Set default network if empty
- if cfg.Net == "" {
- cfg.Net = "tcp"
- }
-
- // Set default address if empty
- if cfg.Addr == "" {
- switch cfg.Net {
- case "tcp":
- cfg.Addr = "127.0.0.1:3306"
- case "unix":
- cfg.Addr = "/tmp/mysql.sock"
- default:
- return nil, errors.New("default addr for network '" + cfg.Net + "' unknown")
- }
-
- }
-
return
}
@@ -472,14 +496,18 @@ func parseDSNParams(cfg *Config, params string) (err error) {
return
}
- // Strict mode
- case "strict":
+ // Reject read-only connections
+ case "rejectReadOnly":
var isBool bool
- cfg.Strict, isBool = readBool(value)
+ cfg.RejectReadOnly, isBool = readBool(value)
if !isBool {
return errors.New("invalid bool value: " + value)
}
+ // Strict mode
+ case "strict":
+ panic("strict mode has been removed. See https://github.com/go-sql-driver/mysql/wiki/strict-mode")
+
// Dial Timeout
case "timeout":
cfg.Timeout, err = time.ParseDuration(value)
@@ -494,6 +522,10 @@ func parseDSNParams(cfg *Config, params string) (err error) {
if boolValue {
cfg.TLSConfig = "true"
cfg.tls = &tls.Config{}
+ host, _, err := net.SplitHostPort(cfg.Addr)
+ if err == nil {
+ cfg.tls.ServerName = host
+ }
} else {
cfg.TLSConfig = "false"
}
@@ -506,7 +538,7 @@ func parseDSNParams(cfg *Config, params string) (err error) {
return fmt.Errorf("invalid value for TLS config name: %v", err)
}
- if tlsConfig, ok := tlsConfigRegister[name]; ok {
+ if tlsConfig := getTLSConfigClone(name); tlsConfig != nil {
if len(tlsConfig.ServerName) == 0 && !tlsConfig.InsecureSkipVerify {
host, _, err := net.SplitHostPort(cfg.Addr)
if err == nil {
@@ -546,3 +578,10 @@ func parseDSNParams(cfg *Config, params string) (err error) {
return
}
+
+func ensureHavePort(addr string) string {
+ if _, _, err := net.SplitHostPort(addr); err != nil {
+ return net.JoinHostPort(addr, "3306")
+ }
+ return addr
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/errors.go b/vendor/github.com/go-sql-driver/mysql/errors.go
index 857854e..760782f 100644
--- a/vendor/github.com/go-sql-driver/mysql/errors.go
+++ b/vendor/github.com/go-sql-driver/mysql/errors.go
@@ -9,10 +9,8 @@
package mysql
import (
- "database/sql/driver"
"errors"
"fmt"
- "io"
"log"
"os"
)
@@ -31,6 +29,12 @@ var (
ErrPktSyncMul = errors.New("commands out of sync. Did you run multiple statements at once?")
ErrPktTooLarge = errors.New("packet for query is too large. Try adjusting the 'max_allowed_packet' variable on the server")
ErrBusyBuffer = errors.New("busy buffer")
+
+ // errBadConnNoWrite is used for connection errors where nothing was sent to the database yet.
+ // If this happens first in a function starting a database interaction, it should be replaced by driver.ErrBadConn
+ // to trigger a resend.
+ // See https://github.com/go-sql-driver/mysql/pull/302
+ errBadConnNoWrite = errors.New("bad connection")
)
var errLog = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime|log.Lshortfile))
@@ -59,74 +63,3 @@ type MySQLError struct {
func (me *MySQLError) Error() string {
return fmt.Sprintf("Error %d: %s", me.Number, me.Message)
}
-
-// MySQLWarnings is an error type which represents a group of one or more MySQL
-// warnings
-type MySQLWarnings []MySQLWarning
-
-func (mws MySQLWarnings) Error() string {
- var msg string
- for i, warning := range mws {
- if i > 0 {
- msg += "\r\n"
- }
- msg += fmt.Sprintf(
- "%s %s: %s",
- warning.Level,
- warning.Code,
- warning.Message,
- )
- }
- return msg
-}
-
-// MySQLWarning is an error type which represents a single MySQL warning.
-// Warnings are returned in groups only. See MySQLWarnings
-type MySQLWarning struct {
- Level string
- Code string
- Message string
-}
-
-func (mc *mysqlConn) getWarnings() (err error) {
- rows, err := mc.Query("SHOW WARNINGS", nil)
- if err != nil {
- return
- }
-
- var warnings = MySQLWarnings{}
- var values = make([]driver.Value, 3)
-
- for {
- err = rows.Next(values)
- switch err {
- case nil:
- warning := MySQLWarning{}
-
- if raw, ok := values[0].([]byte); ok {
- warning.Level = string(raw)
- } else {
- warning.Level = fmt.Sprintf("%s", values[0])
- }
- if raw, ok := values[1].([]byte); ok {
- warning.Code = string(raw)
- } else {
- warning.Code = fmt.Sprintf("%s", values[1])
- }
- if raw, ok := values[2].([]byte); ok {
- warning.Message = string(raw)
- } else {
- warning.Message = fmt.Sprintf("%s", values[0])
- }
-
- warnings = append(warnings, warning)
-
- case io.EOF:
- return warnings
-
- default:
- rows.Close()
- return
- }
- }
-}
diff --git a/vendor/github.com/go-sql-driver/mysql/fields.go b/vendor/github.com/go-sql-driver/mysql/fields.go
new file mode 100644
index 0000000..cded986
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/fields.go
@@ -0,0 +1,140 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+package mysql
+
+import (
+ "database/sql"
+ "reflect"
+)
+
+var typeDatabaseName = map[fieldType]string{
+ fieldTypeBit: "BIT",
+ fieldTypeBLOB: "BLOB",
+ fieldTypeDate: "DATE",
+ fieldTypeDateTime: "DATETIME",
+ fieldTypeDecimal: "DECIMAL",
+ fieldTypeDouble: "DOUBLE",
+ fieldTypeEnum: "ENUM",
+ fieldTypeFloat: "FLOAT",
+ fieldTypeGeometry: "GEOMETRY",
+ fieldTypeInt24: "MEDIUMINT",
+ fieldTypeJSON: "JSON",
+ fieldTypeLong: "INT",
+ fieldTypeLongBLOB: "LONGBLOB",
+ fieldTypeLongLong: "BIGINT",
+ fieldTypeMediumBLOB: "MEDIUMBLOB",
+ fieldTypeNewDate: "DATE",
+ fieldTypeNewDecimal: "DECIMAL",
+ fieldTypeNULL: "NULL",
+ fieldTypeSet: "SET",
+ fieldTypeShort: "SMALLINT",
+ fieldTypeString: "CHAR",
+ fieldTypeTime: "TIME",
+ fieldTypeTimestamp: "TIMESTAMP",
+ fieldTypeTiny: "TINYINT",
+ fieldTypeTinyBLOB: "TINYBLOB",
+ fieldTypeVarChar: "VARCHAR",
+ fieldTypeVarString: "VARCHAR",
+ fieldTypeYear: "YEAR",
+}
+
+var (
+ scanTypeFloat32 = reflect.TypeOf(float32(0))
+ scanTypeFloat64 = reflect.TypeOf(float64(0))
+ scanTypeInt8 = reflect.TypeOf(int8(0))
+ scanTypeInt16 = reflect.TypeOf(int16(0))
+ scanTypeInt32 = reflect.TypeOf(int32(0))
+ scanTypeInt64 = reflect.TypeOf(int64(0))
+ scanTypeNullFloat = reflect.TypeOf(sql.NullFloat64{})
+ scanTypeNullInt = reflect.TypeOf(sql.NullInt64{})
+ scanTypeNullTime = reflect.TypeOf(NullTime{})
+ scanTypeUint8 = reflect.TypeOf(uint8(0))
+ scanTypeUint16 = reflect.TypeOf(uint16(0))
+ scanTypeUint32 = reflect.TypeOf(uint32(0))
+ scanTypeUint64 = reflect.TypeOf(uint64(0))
+ scanTypeRawBytes = reflect.TypeOf(sql.RawBytes{})
+ scanTypeUnknown = reflect.TypeOf(new(interface{}))
+)
+
+type mysqlField struct {
+ tableName string
+ name string
+ length uint32
+ flags fieldFlag
+ fieldType fieldType
+ decimals byte
+}
+
+func (mf *mysqlField) scanType() reflect.Type {
+ switch mf.fieldType {
+ case fieldTypeTiny:
+ if mf.flags&flagNotNULL != 0 {
+ if mf.flags&flagUnsigned != 0 {
+ return scanTypeUint8
+ }
+ return scanTypeInt8
+ }
+ return scanTypeNullInt
+
+ case fieldTypeShort, fieldTypeYear:
+ if mf.flags&flagNotNULL != 0 {
+ if mf.flags&flagUnsigned != 0 {
+ return scanTypeUint16
+ }
+ return scanTypeInt16
+ }
+ return scanTypeNullInt
+
+ case fieldTypeInt24, fieldTypeLong:
+ if mf.flags&flagNotNULL != 0 {
+ if mf.flags&flagUnsigned != 0 {
+ return scanTypeUint32
+ }
+ return scanTypeInt32
+ }
+ return scanTypeNullInt
+
+ case fieldTypeLongLong:
+ if mf.flags&flagNotNULL != 0 {
+ if mf.flags&flagUnsigned != 0 {
+ return scanTypeUint64
+ }
+ return scanTypeInt64
+ }
+ return scanTypeNullInt
+
+ case fieldTypeFloat:
+ if mf.flags&flagNotNULL != 0 {
+ return scanTypeFloat32
+ }
+ return scanTypeNullFloat
+
+ case fieldTypeDouble:
+ if mf.flags&flagNotNULL != 0 {
+ return scanTypeFloat64
+ }
+ return scanTypeNullFloat
+
+ case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar,
+ fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB,
+ fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB,
+ fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON,
+ fieldTypeTime:
+ return scanTypeRawBytes
+
+ case fieldTypeDate, fieldTypeNewDate,
+ fieldTypeTimestamp, fieldTypeDateTime:
+ // NullTime is always returned for more consistent behavior as it can
+ // handle both cases of parseTime regardless if the field is nullable.
+ return scanTypeNullTime
+
+ default:
+ return scanTypeUnknown
+ }
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/infile.go b/vendor/github.com/go-sql-driver/mysql/infile.go
index 547357c..4020f91 100644
--- a/vendor/github.com/go-sql-driver/mysql/infile.go
+++ b/vendor/github.com/go-sql-driver/mysql/infile.go
@@ -147,7 +147,8 @@ func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
}
// send content packets
- if err == nil {
+ // if packetSize == 0, the Reader contains no data
+ if err == nil && packetSize > 0 {
data := make([]byte, 4+packetSize)
var n int
for err == nil {
diff --git a/vendor/github.com/go-sql-driver/mysql/packets.go b/vendor/github.com/go-sql-driver/mysql/packets.go
index 41b4d3d..f63d250 100644
--- a/vendor/github.com/go-sql-driver/mysql/packets.go
+++ b/vendor/github.com/go-sql-driver/mysql/packets.go
@@ -30,9 +30,12 @@ func (mc *mysqlConn) readPacket() ([]byte, error) {
// read packet header
data, err := mc.buf.readNext(4)
if err != nil {
+ if cerr := mc.canceled.Value(); cerr != nil {
+ return nil, cerr
+ }
errLog.Print(err)
mc.Close()
- return nil, driver.ErrBadConn
+ return nil, ErrInvalidConn
}
// packet length [24 bit]
@@ -54,7 +57,7 @@ func (mc *mysqlConn) readPacket() ([]byte, error) {
if prevData == nil {
errLog.Print(ErrMalformPkt)
mc.Close()
- return nil, driver.ErrBadConn
+ return nil, ErrInvalidConn
}
return prevData, nil
@@ -63,9 +66,12 @@ func (mc *mysqlConn) readPacket() ([]byte, error) {
// read packet body [pktLen bytes]
data, err = mc.buf.readNext(pktLen)
if err != nil {
+ if cerr := mc.canceled.Value(); cerr != nil {
+ return nil, cerr
+ }
errLog.Print(err)
mc.Close()
- return nil, driver.ErrBadConn
+ return nil, ErrInvalidConn
}
// return data if this was the last packet
@@ -125,11 +131,20 @@ func (mc *mysqlConn) writePacket(data []byte) error {
// Handle error
if err == nil { // n != len(data)
+ mc.cleanup()
errLog.Print(ErrMalformPkt)
} else {
+ if cerr := mc.canceled.Value(); cerr != nil {
+ return cerr
+ }
+ if n == 0 && pktLen == len(data)-4 {
+ // only for the first loop iteration when nothing was written yet
+ return errBadConnNoWrite
+ }
+ mc.cleanup()
errLog.Print(err)
}
- return driver.ErrBadConn
+ return ErrInvalidConn
}
}
@@ -263,7 +278,7 @@ func (mc *mysqlConn) writeAuthPacket(cipher []byte) error {
if data == nil {
// can not take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
- return driver.ErrBadConn
+ return errBadConnNoWrite
}
// ClientFlags [32 bit]
@@ -341,7 +356,9 @@ func (mc *mysqlConn) writeAuthPacket(cipher []byte) error {
// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
func (mc *mysqlConn) writeOldAuthPacket(cipher []byte) error {
// User password
- scrambleBuff := scrambleOldPassword(cipher, []byte(mc.cfg.Passwd))
+ // https://dev.mysql.com/doc/internals/en/old-password-authentication.html
+ // Old password authentication only need and will need 8-byte challenge.
+ scrambleBuff := scrambleOldPassword(cipher[:8], []byte(mc.cfg.Passwd))
// Calculate the packet length and add a tailing 0
pktLen := len(scrambleBuff) + 1
@@ -349,7 +366,7 @@ func (mc *mysqlConn) writeOldAuthPacket(cipher []byte) error {
if data == nil {
// can not take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
- return driver.ErrBadConn
+ return errBadConnNoWrite
}
// Add the scrambled password [null terminated string]
@@ -368,7 +385,7 @@ func (mc *mysqlConn) writeClearAuthPacket() error {
if data == nil {
// can not take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
- return driver.ErrBadConn
+ return errBadConnNoWrite
}
// Add the clear password [null terminated string]
@@ -381,7 +398,9 @@ func (mc *mysqlConn) writeClearAuthPacket() error {
// Native password authentication method
// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
func (mc *mysqlConn) writeNativeAuthPacket(cipher []byte) error {
- scrambleBuff := scramblePassword(cipher, []byte(mc.cfg.Passwd))
+ // https://dev.mysql.com/doc/internals/en/secure-password-authentication.html
+ // Native password authentication only need and will need 20-byte challenge.
+ scrambleBuff := scramblePassword(cipher[0:20], []byte(mc.cfg.Passwd))
// Calculate the packet length and add a tailing 0
pktLen := len(scrambleBuff)
@@ -389,7 +408,7 @@ func (mc *mysqlConn) writeNativeAuthPacket(cipher []byte) error {
if data == nil {
// can not take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
- return driver.ErrBadConn
+ return errBadConnNoWrite
}
// Add the scramble
@@ -410,7 +429,7 @@ func (mc *mysqlConn) writeCommandPacket(command byte) error {
if data == nil {
// can not take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
- return driver.ErrBadConn
+ return errBadConnNoWrite
}
// Add command byte
@@ -429,7 +448,7 @@ func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error {
if data == nil {
// can not take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
- return driver.ErrBadConn
+ return errBadConnNoWrite
}
// Add command byte
@@ -450,7 +469,7 @@ func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error {
if data == nil {
// can not take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
- return driver.ErrBadConn
+ return errBadConnNoWrite
}
// Add command byte
@@ -484,25 +503,26 @@ func (mc *mysqlConn) readResultOK() ([]byte, error) {
if len(data) > 1 {
pluginEndIndex := bytes.IndexByte(data, 0x00)
plugin := string(data[1:pluginEndIndex])
- cipher := data[pluginEndIndex+1 : len(data)-1]
+ cipher := data[pluginEndIndex+1:]
- if plugin == "mysql_old_password" {
+ switch plugin {
+ case "mysql_old_password":
// using old_passwords
return cipher, ErrOldPassword
- } else if plugin == "mysql_clear_password" {
+ case "mysql_clear_password":
// using clear text password
return cipher, ErrCleartextPassword
- } else if plugin == "mysql_native_password" {
+ case "mysql_native_password":
// using mysql default authentication method
return cipher, ErrNativePassword
- } else {
+ default:
return cipher, ErrUnknownPlugin
}
- } else {
- // https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::OldAuthSwitchRequest
- return nil, ErrOldPassword
}
+ // https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::OldAuthSwitchRequest
+ return nil, ErrOldPassword
+
default: // Error otherwise
return nil, mc.handleErrorPacket(data)
}
@@ -550,6 +570,22 @@ func (mc *mysqlConn) handleErrorPacket(data []byte) error {
// Error Number [16 bit uint]
errno := binary.LittleEndian.Uint16(data[1:3])
+ // 1792: ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION
+ // 1290: ER_OPTION_PREVENTS_STATEMENT (returned by Aurora during failover)
+ if (errno == 1792 || errno == 1290) && mc.cfg.RejectReadOnly {
+ // Oops; we are connected to a read-only connection, and won't be able
+ // to issue any write statements. Since RejectReadOnly is configured,
+ // we throw away this connection hoping this one would have write
+ // permission. This is specifically for a possible race condition
+ // during failover (e.g. on AWS Aurora). See README.md for more.
+ //
+ // We explicitly close the connection before returning
+ // driver.ErrBadConn to ensure that `database/sql` purges this
+ // connection and initiates a new one for next statement next time.
+ mc.Close()
+ return driver.ErrBadConn
+ }
+
pos := 3
// SQL State [optional: # + 5bytes string]
@@ -589,14 +625,7 @@ func (mc *mysqlConn) handleOkPacket(data []byte) error {
}
// warning count [2 bytes]
- if !mc.strict {
- return nil
- }
- pos := 1 + n + m + 2
- if binary.LittleEndian.Uint16(data[pos:pos+2]) > 0 {
- return mc.getWarnings()
- }
return nil
}
@@ -671,11 +700,14 @@ func (mc *mysqlConn) readColumns(count int) ([]mysqlField, error) {
// Filler [uint8]
// Charset [charset, collation uint8]
+ pos += n + 1 + 2
+
// Length [uint32]
- pos += n + 1 + 2 + 4
+ columns[i].length = binary.LittleEndian.Uint32(data[pos : pos+4])
+ pos += 4
// Field type [uint8]
- columns[i].fieldType = data[pos]
+ columns[i].fieldType = fieldType(data[pos])
pos++
// Flags [uint16]
@@ -808,14 +840,7 @@ func (stmt *mysqlStmt) readPrepareResultPacket() (uint16, error) {
// Reserved [8 bit]
// Warning count [16 bit uint]
- if !stmt.mc.strict {
- return columnCount, nil
- }
- // Check for warnings count > 0, only available in MySQL > 4.1
- if len(data) >= 12 && binary.LittleEndian.Uint16(data[10:12]) > 0 {
- return columnCount, stmt.mc.getWarnings()
- }
return columnCount, nil
}
return 0, err
@@ -900,7 +925,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
if data == nil {
// can not take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
- return driver.ErrBadConn
+ return errBadConnNoWrite
}
// command [1 byte]
@@ -959,7 +984,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
// build NULL-bitmap
if arg == nil {
nullMask[i/8] |= 1 << (uint(i) & 7)
- paramTypes[i+i] = fieldTypeNULL
+ paramTypes[i+i] = byte(fieldTypeNULL)
paramTypes[i+i+1] = 0x00
continue
}
@@ -967,7 +992,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
// cache types and values
switch v := arg.(type) {
case int64:
- paramTypes[i+i] = fieldTypeLongLong
+ paramTypes[i+i] = byte(fieldTypeLongLong)
paramTypes[i+i+1] = 0x00
if cap(paramValues)-len(paramValues)-8 >= 0 {
@@ -983,7 +1008,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
}
case float64:
- paramTypes[i+i] = fieldTypeDouble
+ paramTypes[i+i] = byte(fieldTypeDouble)
paramTypes[i+i+1] = 0x00
if cap(paramValues)-len(paramValues)-8 >= 0 {
@@ -999,7 +1024,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
}
case bool:
- paramTypes[i+i] = fieldTypeTiny
+ paramTypes[i+i] = byte(fieldTypeTiny)
paramTypes[i+i+1] = 0x00
if v {
@@ -1011,7 +1036,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
case []byte:
// Common case (non-nil value) first
if v != nil {
- paramTypes[i+i] = fieldTypeString
+ paramTypes[i+i] = byte(fieldTypeString)
paramTypes[i+i+1] = 0x00
if len(v) < mc.maxAllowedPacket-pos-len(paramValues)-(len(args)-(i+1))*64 {
@@ -1029,11 +1054,11 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
// Handle []byte(nil) as a NULL value
nullMask[i/8] |= 1 << (uint(i) & 7)
- paramTypes[i+i] = fieldTypeNULL
+ paramTypes[i+i] = byte(fieldTypeNULL)
paramTypes[i+i+1] = 0x00
case string:
- paramTypes[i+i] = fieldTypeString
+ paramTypes[i+i] = byte(fieldTypeString)
paramTypes[i+i+1] = 0x00
if len(v) < mc.maxAllowedPacket-pos-len(paramValues)-(len(args)-(i+1))*64 {
@@ -1048,20 +1073,22 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
}
case time.Time:
- paramTypes[i+i] = fieldTypeString
+ paramTypes[i+i] = byte(fieldTypeString)
paramTypes[i+i+1] = 0x00
- var val []byte
+ var a [64]byte
+ var b = a[:0]
+
if v.IsZero() {
- val = []byte("0000-00-00")
+ b = append(b, "0000-00-00"...)
} else {
- val = []byte(v.In(mc.cfg.Loc).Format(timeFormat))
+ b = v.In(mc.cfg.Loc).AppendFormat(b, timeFormat)
}
paramValues = appendLengthEncodedInteger(paramValues,
- uint64(len(val)),
+ uint64(len(b)),
)
- paramValues = append(paramValues, val...)
+ paramValues = append(paramValues, b...)
default:
return fmt.Errorf("can not convert type: %T", arg)
@@ -1120,10 +1147,11 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
}
return io.EOF
}
+ mc := rows.mc
rows.mc = nil
// Error otherwise
- return rows.mc.handleErrorPacket(data)
+ return mc.handleErrorPacket(data)
}
// NULL-bitmap, [(column-count + 7 + 2) / 8 bytes]
@@ -1187,7 +1215,7 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
continue
case fieldTypeFloat:
- dest[i] = float32(math.Float32frombits(binary.LittleEndian.Uint32(data[pos : pos+4])))
+ dest[i] = math.Float32frombits(binary.LittleEndian.Uint32(data[pos : pos+4]))
pos += 4
continue
diff --git a/vendor/github.com/go-sql-driver/mysql/rows.go b/vendor/github.com/go-sql-driver/mysql/rows.go
index 900f548..18f4169 100644
--- a/vendor/github.com/go-sql-driver/mysql/rows.go
+++ b/vendor/github.com/go-sql-driver/mysql/rows.go
@@ -11,34 +11,24 @@ package mysql
import (
"database/sql/driver"
"io"
+ "math"
+ "reflect"
)
-type mysqlField struct {
- tableName string
- name string
- flags fieldFlag
- fieldType byte
- decimals byte
-}
-
type resultSet struct {
- columns []mysqlField
- done bool
+ columns []mysqlField
+ columnNames []string
+ done bool
}
type mysqlRows struct {
- mc *mysqlConn
- rs resultSet
+ mc *mysqlConn
+ rs resultSet
+ finish func()
}
type binaryRows struct {
mysqlRows
- // stmtCols is a pointer to the statement's cached columns for different
- // result sets.
- stmtCols *[][]mysqlField
- // i is a number of the current result set. It is used to fetch proper
- // columns from stmtCols.
- i int
}
type textRows struct {
@@ -46,6 +36,10 @@ type textRows struct {
}
func (rows *mysqlRows) Columns() []string {
+ if rows.rs.columnNames != nil {
+ return rows.rs.columnNames
+ }
+
columns := make([]string, len(rows.rs.columns))
if rows.mc != nil && rows.mc.cfg.ColumnsWithAlias {
for i := range columns {
@@ -60,16 +54,64 @@ func (rows *mysqlRows) Columns() []string {
columns[i] = rows.rs.columns[i].name
}
}
+
+ rows.rs.columnNames = columns
return columns
}
+func (rows *mysqlRows) ColumnTypeDatabaseTypeName(i int) string {
+ if name, ok := typeDatabaseName[rows.rs.columns[i].fieldType]; ok {
+ return name
+ }
+ return ""
+}
+
+// func (rows *mysqlRows) ColumnTypeLength(i int) (length int64, ok bool) {
+// return int64(rows.rs.columns[i].length), true
+// }
+
+func (rows *mysqlRows) ColumnTypeNullable(i int) (nullable, ok bool) {
+ return rows.rs.columns[i].flags&flagNotNULL == 0, true
+}
+
+func (rows *mysqlRows) ColumnTypePrecisionScale(i int) (int64, int64, bool) {
+ column := rows.rs.columns[i]
+ decimals := int64(column.decimals)
+
+ switch column.fieldType {
+ case fieldTypeDecimal, fieldTypeNewDecimal:
+ if decimals > 0 {
+ return int64(column.length) - 2, decimals, true
+ }
+ return int64(column.length) - 1, decimals, true
+ case fieldTypeTimestamp, fieldTypeDateTime, fieldTypeTime:
+ return decimals, decimals, true
+ case fieldTypeFloat, fieldTypeDouble:
+ if decimals == 0x1f {
+ return math.MaxInt64, math.MaxInt64, true
+ }
+ return math.MaxInt64, decimals, true
+ }
+
+ return 0, 0, false
+}
+
+func (rows *mysqlRows) ColumnTypeScanType(i int) reflect.Type {
+ return rows.rs.columns[i].scanType()
+}
+
func (rows *mysqlRows) Close() (err error) {
+ if f := rows.finish; f != nil {
+ f()
+ rows.finish = nil
+ }
+
mc := rows.mc
if mc == nil {
return nil
}
- if mc.netConn == nil {
- return ErrInvalidConn
+ if err := mc.error(); err != nil {
+ return err
}
// Remove unread packets from stream
@@ -97,8 +139,8 @@ func (rows *mysqlRows) nextResultSet() (int, error) {
if rows.mc == nil {
return 0, io.EOF
}
- if rows.mc.netConn == nil {
- return 0, ErrInvalidConn
+ if err := rows.mc.error(); err != nil {
+ return 0, err
}
// Remove unread packets from stream
@@ -132,31 +174,20 @@ func (rows *mysqlRows) nextNotEmptyResultSet() (int, error) {
}
}
-func (rows *binaryRows) NextResultSet() (err error) {
+func (rows *binaryRows) NextResultSet() error {
resLen, err := rows.nextNotEmptyResultSet()
if err != nil {
return err
}
- // get columns, if not cached, read them and cache them.
- if rows.i >= len(*rows.stmtCols) {
- rows.rs.columns, err = rows.mc.readColumns(resLen)
- *rows.stmtCols = append(*rows.stmtCols, rows.rs.columns)
- } else {
- rows.rs.columns = (*rows.stmtCols)[rows.i]
- if err := rows.mc.readUntilEOF(); err != nil {
- return err
- }
- }
-
- rows.i++
- return nil
+ rows.rs.columns, err = rows.mc.readColumns(resLen)
+ return err
}
func (rows *binaryRows) Next(dest []driver.Value) error {
if mc := rows.mc; mc != nil {
- if mc.netConn == nil {
- return ErrInvalidConn
+ if err := mc.error(); err != nil {
+ return err
}
// Fetch next row from stream
@@ -177,8 +208,8 @@ func (rows *textRows) NextResultSet() (err error) {
func (rows *textRows) Next(dest []driver.Value) error {
if mc := rows.mc; mc != nil {
- if mc.netConn == nil {
- return ErrInvalidConn
+ if err := mc.error(); err != nil {
+ return err
}
// Fetch next row from stream
diff --git a/vendor/github.com/go-sql-driver/mysql/statement.go b/vendor/github.com/go-sql-driver/mysql/statement.go
index b887716..ae22350 100644
--- a/vendor/github.com/go-sql-driver/mysql/statement.go
+++ b/vendor/github.com/go-sql-driver/mysql/statement.go
@@ -20,11 +20,10 @@ type mysqlStmt struct {
mc *mysqlConn
id uint32
paramCount int
- columns [][]mysqlField // cached from the first query
}
func (stmt *mysqlStmt) Close() error {
- if stmt.mc == nil || stmt.mc.netConn == nil {
+ if stmt.mc == nil || stmt.mc.closed.IsSet() {
// driver.Stmt.Close can be called more than once, thus this function
// has to be idempotent.
// See also Issue #450 and golang/go#16019.
@@ -46,14 +45,14 @@ func (stmt *mysqlStmt) ColumnConverter(idx int) driver.ValueConverter {
}
func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
- if stmt.mc.netConn == nil {
+ if stmt.mc.closed.IsSet() {
errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
// Send command
err := stmt.writeExecutePacket(args)
if err != nil {
- return nil, err
+ return nil, stmt.mc.markBadConn(err)
}
mc := stmt.mc
@@ -90,14 +89,18 @@ func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
}
func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
- if stmt.mc.netConn == nil {
+ return stmt.query(args)
+}
+
+func (stmt *mysqlStmt) query(args []driver.Value) (*binaryRows, error) {
+ if stmt.mc.closed.IsSet() {
errLog.Print(ErrInvalidConn)
return nil, driver.ErrBadConn
}
// Send command
err := stmt.writeExecutePacket(args)
if err != nil {
- return nil, err
+ return nil, stmt.mc.markBadConn(err)
}
mc := stmt.mc
@@ -109,20 +112,10 @@ func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
}
rows := new(binaryRows)
- rows.stmtCols = &stmt.columns
if resLen > 0 {
rows.mc = mc
- rows.i++
- // Columns
- // If not cached, read them and cache them
- if len(stmt.columns) == 0 {
- rows.rs.columns, err = mc.readColumns(resLen)
- stmt.columns = append(stmt.columns, rows.rs.columns)
- } else {
- rows.rs.columns = stmt.columns[0]
- err = mc.readUntilEOF()
- }
+ rows.rs.columns, err = mc.readColumns(resLen)
} else {
rows.rs.done = true
diff --git a/vendor/github.com/go-sql-driver/mysql/transaction.go b/vendor/github.com/go-sql-driver/mysql/transaction.go
index 33c749b..417d727 100644
--- a/vendor/github.com/go-sql-driver/mysql/transaction.go
+++ b/vendor/github.com/go-sql-driver/mysql/transaction.go
@@ -13,7 +13,7 @@ type mysqlTx struct {
}
func (tx *mysqlTx) Commit() (err error) {
- if tx.mc == nil || tx.mc.netConn == nil {
+ if tx.mc == nil || tx.mc.closed.IsSet() {
return ErrInvalidConn
}
err = tx.mc.exec("COMMIT")
@@ -22,7 +22,7 @@ func (tx *mysqlTx) Commit() (err error) {
}
func (tx *mysqlTx) Rollback() (err error) {
- if tx.mc == nil || tx.mc.netConn == nil {
+ if tx.mc == nil || tx.mc.closed.IsSet() {
return ErrInvalidConn
}
err = tx.mc.exec("ROLLBACK")
diff --git a/vendor/github.com/go-sql-driver/mysql/utils.go b/vendor/github.com/go-sql-driver/mysql/utils.go
index d523b7f..82da830 100644
--- a/vendor/github.com/go-sql-driver/mysql/utils.go
+++ b/vendor/github.com/go-sql-driver/mysql/utils.go
@@ -16,16 +16,21 @@ import (
"fmt"
"io"
"strings"
+ "sync"
+ "sync/atomic"
"time"
)
var (
+ tlsConfigLock sync.RWMutex
tlsConfigRegister map[string]*tls.Config // Register for custom tls.Configs
)
// RegisterTLSConfig registers a custom tls.Config to be used with sql.Open.
// Use the key as a value in the DSN where tls=value.
//
+// Note: The tls.Config provided to needs to be exclusively owned by the driver after registering.
+//
// rootCertPool := x509.NewCertPool()
// pem, err := ioutil.ReadFile("/path/ca-cert.pem")
// if err != nil {
@@ -51,19 +56,32 @@ func RegisterTLSConfig(key string, config *tls.Config) error {
return fmt.Errorf("key '%s' is reserved", key)
}
+ tlsConfigLock.Lock()
if tlsConfigRegister == nil {
tlsConfigRegister = make(map[string]*tls.Config)
}
tlsConfigRegister[key] = config
+ tlsConfigLock.Unlock()
return nil
}
// DeregisterTLSConfig removes the tls.Config associated with key.
func DeregisterTLSConfig(key string) {
+ tlsConfigLock.Lock()
if tlsConfigRegister != nil {
delete(tlsConfigRegister, key)
}
+ tlsConfigLock.Unlock()
+}
+
+func getTLSConfigClone(key string) (config *tls.Config) {
+ tlsConfigLock.RLock()
+ if v, ok := tlsConfigRegister[key]; ok {
+ config = cloneTLSConfig(v)
+ }
+ tlsConfigLock.RUnlock()
+ return
}
// Returns the bool value of the input.
@@ -738,3 +756,67 @@ func escapeStringQuotes(buf []byte, v string) []byte {
return buf[:pos]
}
+
+/******************************************************************************
+* Sync utils *
+******************************************************************************/
+
+// noCopy may be embedded into structs which must not be copied
+// after the first use.
+//
+// See https://github.com/golang/go/issues/8005#issuecomment-190753527
+// for details.
+type noCopy struct{}
+
+// Lock is a no-op used by -copylocks checker from `go vet`.
+func (*noCopy) Lock() {}
+
+// atomicBool is a wrapper around uint32 for usage as a boolean value with
+// atomic access.
+type atomicBool struct {
+ _noCopy noCopy
+ value uint32
+}
+
+// IsSet returns wether the current boolean value is true
+func (ab *atomicBool) IsSet() bool {
+ return atomic.LoadUint32(&ab.value) > 0
+}
+
+// Set sets the value of the bool regardless of the previous value
+func (ab *atomicBool) Set(value bool) {
+ if value {
+ atomic.StoreUint32(&ab.value, 1)
+ } else {
+ atomic.StoreUint32(&ab.value, 0)
+ }
+}
+
+// TrySet sets the value of the bool and returns wether the value changed
+func (ab *atomicBool) TrySet(value bool) bool {
+ if value {
+ return atomic.SwapUint32(&ab.value, 1) == 0
+ }
+ return atomic.SwapUint32(&ab.value, 0) > 0
+}
+
+// atomicBool is a wrapper for atomically accessed error values
+type atomicError struct {
+ _noCopy noCopy
+ value atomic.Value
+}
+
+// Set sets the error value regardless of the previous value.
+// The value must not be nil
+func (ae *atomicError) Set(value error) {
+ ae.value.Store(value)
+}
+
+// Value returns the current error value
+func (ae *atomicError) Value() error {
+ if v := ae.value.Load(); v != nil {
+ // this will panic if the value doesn't implement the error interface
+ return v.(error)
+ }
+ return nil
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/utils_go17.go b/vendor/github.com/go-sql-driver/mysql/utils_go17.go
new file mode 100644
index 0000000..f595634
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/utils_go17.go
@@ -0,0 +1,40 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// +build go1.7
+// +build !go1.8
+
+package mysql
+
+import "crypto/tls"
+
+func cloneTLSConfig(c *tls.Config) *tls.Config {
+ return &tls.Config{
+ Rand: c.Rand,
+ Time: c.Time,
+ Certificates: c.Certificates,
+ NameToCertificate: c.NameToCertificate,
+ GetCertificate: c.GetCertificate,
+ RootCAs: c.RootCAs,
+ NextProtos: c.NextProtos,
+ ServerName: c.ServerName,
+ ClientAuth: c.ClientAuth,
+ ClientCAs: c.ClientCAs,
+ InsecureSkipVerify: c.InsecureSkipVerify,
+ CipherSuites: c.CipherSuites,
+ PreferServerCipherSuites: c.PreferServerCipherSuites,
+ SessionTicketsDisabled: c.SessionTicketsDisabled,
+ SessionTicketKey: c.SessionTicketKey,
+ ClientSessionCache: c.ClientSessionCache,
+ MinVersion: c.MinVersion,
+ MaxVersion: c.MaxVersion,
+ CurvePreferences: c.CurvePreferences,
+ DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
+ Renegotiation: c.Renegotiation,
+ }
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/utils_go18.go b/vendor/github.com/go-sql-driver/mysql/utils_go18.go
new file mode 100644
index 0000000..7d8c9b1
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/utils_go18.go
@@ -0,0 +1,49 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// +build go1.8
+
+package mysql
+
+import (
+ "crypto/tls"
+ "database/sql"
+ "database/sql/driver"
+ "errors"
+)
+
+func cloneTLSConfig(c *tls.Config) *tls.Config {
+ return c.Clone()
+}
+
+func namedValueToValue(named []driver.NamedValue) ([]driver.Value, error) {
+ dargs := make([]driver.Value, len(named))
+ for n, param := range named {
+ if len(param.Name) > 0 {
+ // TODO: support the use of Named Parameters #561
+ return nil, errors.New("mysql: driver does not support the use of Named Parameters")
+ }
+ dargs[n] = param.Value
+ }
+ return dargs, nil
+}
+
+func mapIsolationLevel(level driver.IsolationLevel) (string, error) {
+ switch sql.IsolationLevel(level) {
+ case sql.LevelRepeatableRead:
+ return "REPEATABLE READ", nil
+ case sql.LevelReadCommitted:
+ return "READ COMMITTED", nil
+ case sql.LevelReadUncommitted:
+ return "READ UNCOMMITTED", nil
+ case sql.LevelSerializable:
+ return "SERIALIZABLE", nil
+ default:
+ return "", errors.New("mysql: unsupported isolation level: " + string(level))
+ }
+}
diff --git a/vendor/github.com/go-sql-driver/mysql/utils_legacy.go b/vendor/github.com/go-sql-driver/mysql/utils_legacy.go
new file mode 100644
index 0000000..a03b10d
--- /dev/null
+++ b/vendor/github.com/go-sql-driver/mysql/utils_legacy.go
@@ -0,0 +1,18 @@
+// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
+//
+// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved.
+//
+// This Source Code Form is subject to the terms of the Mozilla Public
+// License, v. 2.0. If a copy of the MPL was not distributed with this file,
+// You can obtain one at http://mozilla.org/MPL/2.0/.
+
+// +build !go1.7
+
+package mysql
+
+import "crypto/tls"
+
+func cloneTLSConfig(c *tls.Config) *tls.Config {
+ clone := *c
+ return &clone
+}
diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go
index 2b30f84..8b84d1b 100644
--- a/vendor/github.com/golang/protobuf/proto/encode.go
+++ b/vendor/github.com/golang/protobuf/proto/encode.go
@@ -174,11 +174,11 @@ func sizeFixed32(x uint64) int {
// This is the format used for the sint64 protocol buffer type.
func (p *Buffer) EncodeZigzag64(x uint64) error {
// use signed number to get arithmetic right shift.
- return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+ return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63)))
}
func sizeZigzag64(x uint64) int {
- return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+ return sizeVarint((x << 1) ^ uint64((int64(x) >> 63)))
}
// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go
index ac4ddbc..1c22550 100644
--- a/vendor/github.com/golang/protobuf/proto/lib.go
+++ b/vendor/github.com/golang/protobuf/proto/lib.go
@@ -73,7 +73,6 @@ for a protocol buffer variable v:
When the .proto file specifies `syntax="proto3"`, there are some differences:
- Non-repeated fields of non-message type are values instead of pointers.
- - Getters are only generated for message and oneof fields.
- Enum types do not get an Enum method.
The simplest way to describe this is to see an example.
diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go
index 61f83c1..5e14513 100644
--- a/vendor/github.com/golang/protobuf/proto/text_parser.go
+++ b/vendor/github.com/golang/protobuf/proto/text_parser.go
@@ -865,7 +865,7 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error {
return p.readStruct(fv, terminator)
case reflect.Uint32:
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
- fv.SetUint(uint64(x))
+ fv.SetUint(x)
return nil
}
case reflect.Uint64:
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile
index 4942418..f706871 100644
--- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile
@@ -32,8 +32,6 @@
# Not stored here, but descriptor.proto is in https://github.com/google/protobuf/
# at src/google/protobuf/descriptor.proto
regenerate:
- echo WARNING! THIS RULE IS PROBABLY NOT RIGHT FOR YOUR INSTALLATION
- protoc --go_out=. -I$(HOME)/src/protobuf/src $(HOME)/src/protobuf/src/google/protobuf/descriptor.proto && \
- sed 's,^package google_protobuf,package descriptor,' google/protobuf/descriptor.pb.go > \
- $(GOPATH)/src/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go && \
- rm -f google/protobuf/descriptor.pb.go
+ @echo WARNING! THIS RULE IS PROBABLY NOT RIGHT FOR YOUR INSTALLATION
+ cp $(HOME)/src/protobuf/include/google/protobuf/descriptor.proto .
+ protoc --go_out=../../../../.. -I$(HOME)/src/protobuf/include $(HOME)/src/protobuf/include/google/protobuf/descriptor.proto
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
index a1d8a76..c6a91bc 100644
--- a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
@@ -1,6 +1,5 @@
-// Code generated by protoc-gen-go.
+// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/protobuf/descriptor.proto
-// DO NOT EDIT!
/*
Package descriptor is a generated protocol buffer package.
@@ -12,6 +11,7 @@ It has these top-level messages:
FileDescriptorSet
FileDescriptorProto
DescriptorProto
+ ExtensionRangeOptions
FieldDescriptorProto
OneofDescriptorProto
EnumDescriptorProto
@@ -65,6 +65,10 @@ const (
FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7
FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8
FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9
+ // Tag-delimited aggregate.
+ // Group type is deprecated and not supported in proto3. However, Proto3
+ // implementations should still be able to parse the group wire format and
+ // treat group fields as unknown fields.
FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10
FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11
// New in version 2.
@@ -134,7 +138,7 @@ func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error {
*x = FieldDescriptorProto_Type(value)
return nil
}
-func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{3, 0} }
+func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{4, 0} }
type FieldDescriptorProto_Label int32
@@ -173,7 +177,7 @@ func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error {
return nil
}
func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) {
- return fileDescriptor0, []int{3, 1}
+ return fileDescriptor0, []int{4, 1}
}
// Generated classes can be optimized for speed or code size.
@@ -213,7 +217,7 @@ func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error {
*x = FileOptions_OptimizeMode(value)
return nil
}
-func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{9, 0} }
+func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{10, 0} }
type FieldOptions_CType int32
@@ -251,7 +255,7 @@ func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error {
*x = FieldOptions_CType(value)
return nil
}
-func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{11, 0} }
+func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{12, 0} }
type FieldOptions_JSType int32
@@ -291,7 +295,49 @@ func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error {
*x = FieldOptions_JSType(value)
return nil
}
-func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{11, 1} }
+func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{12, 1} }
+
+// Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
+// or neither? HTTP based RPC implementation may choose GET verb for safe
+// methods, and PUT verb for idempotent methods instead of the default POST.
+type MethodOptions_IdempotencyLevel int32
+
+const (
+ MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0
+ MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1
+ MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2
+)
+
+var MethodOptions_IdempotencyLevel_name = map[int32]string{
+ 0: "IDEMPOTENCY_UNKNOWN",
+ 1: "NO_SIDE_EFFECTS",
+ 2: "IDEMPOTENT",
+}
+var MethodOptions_IdempotencyLevel_value = map[string]int32{
+ "IDEMPOTENCY_UNKNOWN": 0,
+ "NO_SIDE_EFFECTS": 1,
+ "IDEMPOTENT": 2,
+}
+
+func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel {
+ p := new(MethodOptions_IdempotencyLevel)
+ *p = x
+ return p
+}
+func (x MethodOptions_IdempotencyLevel) String() string {
+ return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x))
+}
+func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel")
+ if err != nil {
+ return err
+ }
+ *x = MethodOptions_IdempotencyLevel(value)
+ return nil
+}
+func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor0, []int{17, 0}
+}
// The protocol compiler can output a FileDescriptorSet containing the .proto
// files it parses.
@@ -522,9 +568,10 @@ func (m *DescriptorProto) GetReservedName() []string {
}
type DescriptorProto_ExtensionRange struct {
- Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
- End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
- XXX_unrecognized []byte `json:"-"`
+ Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
+ End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
+ Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
}
func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} }
@@ -548,6 +595,13 @@ func (m *DescriptorProto_ExtensionRange) GetEnd() int32 {
return 0
}
+func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
// Range of reserved tag numbers. Reserved tag numbers may not be used by
// fields or extension ranges in the same message. Reserved ranges may
// not overlap.
@@ -578,6 +632,33 @@ func (m *DescriptorProto_ReservedRange) GetEnd() int32 {
return 0
}
+type ExtensionRangeOptions struct {
+ // The parser stores options it doesn't recognize here. See above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} }
+func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) }
+func (*ExtensionRangeOptions) ProtoMessage() {}
+func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+
+var extRange_ExtensionRangeOptions = []proto.ExtensionRange{
+ {1000, 536870911},
+}
+
+func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_ExtensionRangeOptions
+}
+
+func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if m != nil {
+ return m.UninterpretedOption
+ }
+ return nil
+}
+
// Describes a field within a message.
type FieldDescriptorProto struct {
Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
@@ -616,7 +697,7 @@ type FieldDescriptorProto struct {
func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} }
func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) }
func (*FieldDescriptorProto) ProtoMessage() {}
-func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *FieldDescriptorProto) GetName() string {
if m != nil && m.Name != nil {
@@ -698,7 +779,7 @@ type OneofDescriptorProto struct {
func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} }
func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) }
func (*OneofDescriptorProto) ProtoMessage() {}
-func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
+func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *OneofDescriptorProto) GetName() string {
if m != nil && m.Name != nil {
@@ -725,7 +806,7 @@ type EnumDescriptorProto struct {
func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} }
func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) }
func (*EnumDescriptorProto) ProtoMessage() {}
-func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
+func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (m *EnumDescriptorProto) GetName() string {
if m != nil && m.Name != nil {
@@ -759,7 +840,7 @@ type EnumValueDescriptorProto struct {
func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} }
func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) }
func (*EnumValueDescriptorProto) ProtoMessage() {}
-func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
+func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
func (m *EnumValueDescriptorProto) GetName() string {
if m != nil && m.Name != nil {
@@ -793,7 +874,7 @@ type ServiceDescriptorProto struct {
func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} }
func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) }
func (*ServiceDescriptorProto) ProtoMessage() {}
-func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
+func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
func (m *ServiceDescriptorProto) GetName() string {
if m != nil && m.Name != nil {
@@ -834,7 +915,7 @@ type MethodDescriptorProto struct {
func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} }
func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) }
func (*MethodDescriptorProto) ProtoMessage() {}
-func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
+func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
const Default_MethodDescriptorProto_ClientStreaming bool = false
const Default_MethodDescriptorProto_ServerStreaming bool = false
@@ -929,6 +1010,7 @@ type FileOptions struct {
CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"`
JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"`
PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"`
+ PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"`
// Is this file deprecated?
// Depending on the target platform, this can emit Deprecated annotations
// for everything in the file, or it will be completely ignored; in the very
@@ -942,6 +1024,18 @@ type FileOptions struct {
ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"`
// Namespace for generated classes; defaults to the package.
CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"`
+ // By default Swift generators will take the proto package and CamelCase it
+ // replacing '.' with underscore and use that to prefix the types/symbols
+ // defined. When this options is provided, they will use this value instead
+ // to prefix the types/symbols defined.
+ SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"`
+ // Sets the php class prefix which is prepended to all php generated classes
+ // from this .proto. Default is empty.
+ PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"`
+ // Use this option to change the namespace of php generated classes. Default
+ // is empty. When this option is empty, the package name will be used for
+ // determining the namespace.
+ PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
proto.XXX_InternalExtensions `json:"-"`
@@ -951,7 +1045,7 @@ type FileOptions struct {
func (m *FileOptions) Reset() { *m = FileOptions{} }
func (m *FileOptions) String() string { return proto.CompactTextString(m) }
func (*FileOptions) ProtoMessage() {}
-func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
+func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
var extRange_FileOptions = []proto.ExtensionRange{
{1000, 536870911},
@@ -967,6 +1061,7 @@ const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPE
const Default_FileOptions_CcGenericServices bool = false
const Default_FileOptions_JavaGenericServices bool = false
const Default_FileOptions_PyGenericServices bool = false
+const Default_FileOptions_PhpGenericServices bool = false
const Default_FileOptions_Deprecated bool = false
const Default_FileOptions_CcEnableArenas bool = false
@@ -1040,6 +1135,13 @@ func (m *FileOptions) GetPyGenericServices() bool {
return Default_FileOptions_PyGenericServices
}
+func (m *FileOptions) GetPhpGenericServices() bool {
+ if m != nil && m.PhpGenericServices != nil {
+ return *m.PhpGenericServices
+ }
+ return Default_FileOptions_PhpGenericServices
+}
+
func (m *FileOptions) GetDeprecated() bool {
if m != nil && m.Deprecated != nil {
return *m.Deprecated
@@ -1068,6 +1170,27 @@ func (m *FileOptions) GetCsharpNamespace() string {
return ""
}
+func (m *FileOptions) GetSwiftPrefix() string {
+ if m != nil && m.SwiftPrefix != nil {
+ return *m.SwiftPrefix
+ }
+ return ""
+}
+
+func (m *FileOptions) GetPhpClassPrefix() string {
+ if m != nil && m.PhpClassPrefix != nil {
+ return *m.PhpClassPrefix
+ }
+ return ""
+}
+
+func (m *FileOptions) GetPhpNamespace() string {
+ if m != nil && m.PhpNamespace != nil {
+ return *m.PhpNamespace
+ }
+ return ""
+}
+
func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption {
if m != nil {
return m.UninterpretedOption
@@ -1135,7 +1258,7 @@ type MessageOptions struct {
func (m *MessageOptions) Reset() { *m = MessageOptions{} }
func (m *MessageOptions) String() string { return proto.CompactTextString(m) }
func (*MessageOptions) ProtoMessage() {}
-func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
+func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
var extRange_MessageOptions = []proto.ExtensionRange{
{1000, 536870911},
@@ -1198,13 +1321,15 @@ type FieldOptions struct {
Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"`
// The jstype option determines the JavaScript type used for values of the
// field. The option is permitted only for 64 bit integral and fixed types
- // (int64, uint64, sint64, fixed64, sfixed64). By default these types are
- // represented as JavaScript strings. This avoids loss of precision that can
- // happen when a large value is converted to a floating point JavaScript
- // numbers. Specifying JS_NUMBER for the jstype causes the generated
- // JavaScript code to use the JavaScript "number" type instead of strings.
- // This option is an enum to permit additional types to be added,
- // e.g. goog.math.Integer.
+ // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING
+ // is represented as JavaScript string, which avoids loss of precision that
+ // can happen when a large value is converted to a floating point JavaScript.
+ // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to
+ // use the JavaScript "number" type. The behavior of the default option
+ // JS_NORMAL is implementation dependent.
+ //
+ // This option is an enum to permit additional types to be added, e.g.
+ // goog.math.Integer.
Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"`
// Should this field be parsed lazily? Lazy applies only to message-type
// fields. It means that when the outer message is initially parsed, the
@@ -1251,7 +1376,7 @@ type FieldOptions struct {
func (m *FieldOptions) Reset() { *m = FieldOptions{} }
func (m *FieldOptions) String() string { return proto.CompactTextString(m) }
func (*FieldOptions) ProtoMessage() {}
-func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
+func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
var extRange_FieldOptions = []proto.ExtensionRange{
{1000, 536870911},
@@ -1326,7 +1451,7 @@ type OneofOptions struct {
func (m *OneofOptions) Reset() { *m = OneofOptions{} }
func (m *OneofOptions) String() string { return proto.CompactTextString(m) }
func (*OneofOptions) ProtoMessage() {}
-func (*OneofOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
+func (*OneofOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
var extRange_OneofOptions = []proto.ExtensionRange{
{1000, 536870911},
@@ -1361,7 +1486,7 @@ type EnumOptions struct {
func (m *EnumOptions) Reset() { *m = EnumOptions{} }
func (m *EnumOptions) String() string { return proto.CompactTextString(m) }
func (*EnumOptions) ProtoMessage() {}
-func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
+func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
var extRange_EnumOptions = []proto.ExtensionRange{
{1000, 536870911},
@@ -1409,7 +1534,7 @@ type EnumValueOptions struct {
func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} }
func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) }
func (*EnumValueOptions) ProtoMessage() {}
-func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
+func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
var extRange_EnumValueOptions = []proto.ExtensionRange{
{1000, 536870911},
@@ -1450,7 +1575,7 @@ type ServiceOptions struct {
func (m *ServiceOptions) Reset() { *m = ServiceOptions{} }
func (m *ServiceOptions) String() string { return proto.CompactTextString(m) }
func (*ServiceOptions) ProtoMessage() {}
-func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
+func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
var extRange_ServiceOptions = []proto.ExtensionRange{
{1000, 536870911},
@@ -1481,7 +1606,8 @@ type MethodOptions struct {
// Depending on the target platform, this can emit Deprecated annotations
// for the method, or it will be completely ignored; in the very least,
// this is a formalization for deprecating methods.
- Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"`
// The parser stores options it doesn't recognize here. See above.
UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
proto.XXX_InternalExtensions `json:"-"`
@@ -1491,7 +1617,7 @@ type MethodOptions struct {
func (m *MethodOptions) Reset() { *m = MethodOptions{} }
func (m *MethodOptions) String() string { return proto.CompactTextString(m) }
func (*MethodOptions) ProtoMessage() {}
-func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
+func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
var extRange_MethodOptions = []proto.ExtensionRange{
{1000, 536870911},
@@ -1502,6 +1628,7 @@ func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange {
}
const Default_MethodOptions_Deprecated bool = false
+const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN
func (m *MethodOptions) GetDeprecated() bool {
if m != nil && m.Deprecated != nil {
@@ -1510,6 +1637,13 @@ func (m *MethodOptions) GetDeprecated() bool {
return Default_MethodOptions_Deprecated
}
+func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel {
+ if m != nil && m.IdempotencyLevel != nil {
+ return *m.IdempotencyLevel
+ }
+ return Default_MethodOptions_IdempotencyLevel
+}
+
func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption {
if m != nil {
return m.UninterpretedOption
@@ -1539,7 +1673,7 @@ type UninterpretedOption struct {
func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} }
func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) }
func (*UninterpretedOption) ProtoMessage() {}
-func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
+func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart {
if m != nil {
@@ -1605,7 +1739,7 @@ func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOptio
func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) }
func (*UninterpretedOption_NamePart) ProtoMessage() {}
func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) {
- return fileDescriptor0, []int{17, 0}
+ return fileDescriptor0, []int{18, 0}
}
func (m *UninterpretedOption_NamePart) GetNamePart() string {
@@ -1675,7 +1809,7 @@ type SourceCodeInfo struct {
func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} }
func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) }
func (*SourceCodeInfo) ProtoMessage() {}
-func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
+func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} }
func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location {
if m != nil {
@@ -1771,7 +1905,7 @@ type SourceCodeInfo_Location struct {
func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} }
func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) }
func (*SourceCodeInfo_Location) ProtoMessage() {}
-func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18, 0} }
+func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19, 0} }
func (m *SourceCodeInfo_Location) GetPath() []int32 {
if m != nil {
@@ -1821,7 +1955,7 @@ type GeneratedCodeInfo struct {
func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} }
func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) }
func (*GeneratedCodeInfo) ProtoMessage() {}
-func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} }
+func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} }
func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation {
if m != nil {
@@ -1850,7 +1984,7 @@ func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_
func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) }
func (*GeneratedCodeInfo_Annotation) ProtoMessage() {}
func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) {
- return fileDescriptor0, []int{19, 0}
+ return fileDescriptor0, []int{20, 0}
}
func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 {
@@ -1887,6 +2021,7 @@ func init() {
proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto")
proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange")
proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange")
+ proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions")
proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto")
proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto")
proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto")
@@ -1912,154 +2047,169 @@ func init() {
proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value)
proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value)
proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value)
+ proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value)
}
func init() { proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
- // 2295 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x59, 0x4f, 0x6f, 0x1b, 0xc7,
- 0x15, 0xcf, 0xf2, 0x9f, 0xc8, 0x47, 0x8a, 0x1a, 0x8d, 0x14, 0x67, 0xad, 0xfc, 0xb1, 0xcc, 0xd8,
- 0xb1, 0x6c, 0xb7, 0x74, 0x20, 0xff, 0x89, 0xa3, 0x14, 0x29, 0x28, 0x71, 0xad, 0xd0, 0x90, 0x44,
- 0x76, 0x29, 0xb5, 0x4e, 0x2e, 0x8b, 0xd1, 0xee, 0x90, 0x5a, 0x7b, 0x39, 0xbb, 0xdd, 0x5d, 0xda,
- 0x56, 0x4e, 0x06, 0x7a, 0xea, 0xa5, 0xe7, 0xa2, 0x2d, 0x7a, 0xc8, 0x25, 0x40, 0x3f, 0x40, 0x0f,
- 0xfd, 0x0a, 0x05, 0x0a, 0xf4, 0x2b, 0x14, 0x05, 0xda, 0x6f, 0xd0, 0x6b, 0x31, 0x33, 0xbb, 0xcb,
- 0x5d, 0xfe, 0x89, 0xd5, 0x00, 0x49, 0x7a, 0x12, 0xe7, 0xf7, 0x7e, 0xef, 0xcd, 0x9b, 0x37, 0x6f,
- 0xde, 0xbc, 0x1d, 0xc1, 0xe6, 0xd0, 0x75, 0x87, 0x0e, 0xbd, 0xe3, 0xf9, 0x6e, 0xe8, 0x9e, 0x8e,
- 0x07, 0x77, 0x2c, 0x1a, 0x98, 0xbe, 0xed, 0x85, 0xae, 0xdf, 0x14, 0x18, 0x5e, 0x91, 0x8c, 0x66,
- 0xcc, 0x68, 0x1c, 0xc2, 0xea, 0x23, 0xdb, 0xa1, 0xed, 0x84, 0xd8, 0xa7, 0x21, 0x7e, 0x08, 0x85,
- 0x81, 0xed, 0x50, 0x55, 0xd9, 0xcc, 0x6f, 0x55, 0xb7, 0xaf, 0x35, 0xa7, 0x94, 0x9a, 0x59, 0x8d,
- 0x1e, 0x87, 0x75, 0xa1, 0xd1, 0xf8, 0x67, 0x01, 0xd6, 0xe6, 0x48, 0x31, 0x86, 0x02, 0x23, 0x23,
- 0x6e, 0x51, 0xd9, 0xaa, 0xe8, 0xe2, 0x37, 0x56, 0x61, 0xc9, 0x23, 0xe6, 0x33, 0x32, 0xa4, 0x6a,
- 0x4e, 0xc0, 0xf1, 0x10, 0xbf, 0x07, 0x60, 0x51, 0x8f, 0x32, 0x8b, 0x32, 0xf3, 0x5c, 0xcd, 0x6f,
- 0xe6, 0xb7, 0x2a, 0x7a, 0x0a, 0xc1, 0xb7, 0x61, 0xd5, 0x1b, 0x9f, 0x3a, 0xb6, 0x69, 0xa4, 0x68,
- 0xb0, 0x99, 0xdf, 0x2a, 0xea, 0x48, 0x0a, 0xda, 0x13, 0xf2, 0x0d, 0x58, 0x79, 0x41, 0xc9, 0xb3,
- 0x34, 0xb5, 0x2a, 0xa8, 0x75, 0x0e, 0xa7, 0x88, 0x7b, 0x50, 0x1b, 0xd1, 0x20, 0x20, 0x43, 0x6a,
- 0x84, 0xe7, 0x1e, 0x55, 0x0b, 0x62, 0xf5, 0x9b, 0x33, 0xab, 0x9f, 0x5e, 0x79, 0x35, 0xd2, 0x3a,
- 0x3e, 0xf7, 0x28, 0x6e, 0x41, 0x85, 0xb2, 0xf1, 0x48, 0x5a, 0x28, 0x2e, 0x88, 0x9f, 0xc6, 0xc6,
- 0xa3, 0x69, 0x2b, 0x65, 0xae, 0x16, 0x99, 0x58, 0x0a, 0xa8, 0xff, 0xdc, 0x36, 0xa9, 0x5a, 0x12,
- 0x06, 0x6e, 0xcc, 0x18, 0xe8, 0x4b, 0xf9, 0xb4, 0x8d, 0x58, 0x0f, 0xef, 0x41, 0x85, 0xbe, 0x0c,
- 0x29, 0x0b, 0x6c, 0x97, 0xa9, 0x4b, 0xc2, 0xc8, 0xf5, 0x39, 0xbb, 0x48, 0x1d, 0x6b, 0xda, 0xc4,
- 0x44, 0x0f, 0x3f, 0x80, 0x25, 0xd7, 0x0b, 0x6d, 0x97, 0x05, 0x6a, 0x79, 0x53, 0xd9, 0xaa, 0x6e,
- 0xbf, 0x33, 0x37, 0x11, 0xba, 0x92, 0xa3, 0xc7, 0x64, 0xdc, 0x01, 0x14, 0xb8, 0x63, 0xdf, 0xa4,
- 0x86, 0xe9, 0x5a, 0xd4, 0xb0, 0xd9, 0xc0, 0x55, 0x2b, 0xc2, 0xc0, 0x95, 0xd9, 0x85, 0x08, 0xe2,
- 0x9e, 0x6b, 0xd1, 0x0e, 0x1b, 0xb8, 0x7a, 0x3d, 0xc8, 0x8c, 0xf1, 0x25, 0x28, 0x05, 0xe7, 0x2c,
- 0x24, 0x2f, 0xd5, 0x9a, 0xc8, 0x90, 0x68, 0xd4, 0xf8, 0x4f, 0x11, 0x56, 0x2e, 0x92, 0x62, 0x9f,
- 0x40, 0x71, 0xc0, 0x57, 0xa9, 0xe6, 0xfe, 0x97, 0x18, 0x48, 0x9d, 0x6c, 0x10, 0x4b, 0xdf, 0x32,
- 0x88, 0x2d, 0xa8, 0x32, 0x1a, 0x84, 0xd4, 0x92, 0x19, 0x91, 0xbf, 0x60, 0x4e, 0x81, 0x54, 0x9a,
- 0x4d, 0xa9, 0xc2, 0xb7, 0x4a, 0xa9, 0x27, 0xb0, 0x92, 0xb8, 0x64, 0xf8, 0x84, 0x0d, 0xe3, 0xdc,
- 0xbc, 0xf3, 0x3a, 0x4f, 0x9a, 0x5a, 0xac, 0xa7, 0x73, 0x35, 0xbd, 0x4e, 0x33, 0x63, 0xdc, 0x06,
- 0x70, 0x19, 0x75, 0x07, 0x86, 0x45, 0x4d, 0x47, 0x2d, 0x2f, 0x88, 0x52, 0x97, 0x53, 0x66, 0xa2,
- 0xe4, 0x4a, 0xd4, 0x74, 0xf0, 0xc7, 0x93, 0x54, 0x5b, 0x5a, 0x90, 0x29, 0x87, 0xf2, 0x90, 0xcd,
- 0x64, 0xdb, 0x09, 0xd4, 0x7d, 0xca, 0xf3, 0x9e, 0x5a, 0xd1, 0xca, 0x2a, 0xc2, 0x89, 0xe6, 0x6b,
- 0x57, 0xa6, 0x47, 0x6a, 0x72, 0x61, 0xcb, 0x7e, 0x7a, 0x88, 0xdf, 0x87, 0x04, 0x30, 0x44, 0x5a,
- 0x81, 0xa8, 0x42, 0xb5, 0x18, 0x3c, 0x22, 0x23, 0xba, 0xf1, 0x10, 0xea, 0xd9, 0xf0, 0xe0, 0x75,
- 0x28, 0x06, 0x21, 0xf1, 0x43, 0x91, 0x85, 0x45, 0x5d, 0x0e, 0x30, 0x82, 0x3c, 0x65, 0x96, 0xa8,
- 0x72, 0x45, 0x9d, 0xff, 0xdc, 0xf8, 0x08, 0x96, 0x33, 0xd3, 0x5f, 0x54, 0xb1, 0xf1, 0xdb, 0x12,
- 0xac, 0xcf, 0xcb, 0xb9, 0xb9, 0xe9, 0x7f, 0x09, 0x4a, 0x6c, 0x3c, 0x3a, 0xa5, 0xbe, 0x9a, 0x17,
- 0x16, 0xa2, 0x11, 0x6e, 0x41, 0xd1, 0x21, 0xa7, 0xd4, 0x51, 0x0b, 0x9b, 0xca, 0x56, 0x7d, 0xfb,
- 0xf6, 0x85, 0xb2, 0xba, 0x79, 0xc0, 0x55, 0x74, 0xa9, 0x89, 0x3f, 0x85, 0x42, 0x54, 0xe2, 0xb8,
- 0x85, 0x5b, 0x17, 0xb3, 0xc0, 0x73, 0x51, 0x17, 0x7a, 0xf8, 0x6d, 0xa8, 0xf0, 0xbf, 0x32, 0xb6,
- 0x25, 0xe1, 0x73, 0x99, 0x03, 0x3c, 0xae, 0x78, 0x03, 0xca, 0x22, 0xcd, 0x2c, 0x1a, 0x5f, 0x0d,
- 0xc9, 0x98, 0x6f, 0x8c, 0x45, 0x07, 0x64, 0xec, 0x84, 0xc6, 0x73, 0xe2, 0x8c, 0xa9, 0x48, 0x98,
- 0x8a, 0x5e, 0x8b, 0xc0, 0x9f, 0x73, 0x0c, 0x5f, 0x81, 0xaa, 0xcc, 0x4a, 0x9b, 0x59, 0xf4, 0xa5,
- 0xa8, 0x3e, 0x45, 0x5d, 0x26, 0x6a, 0x87, 0x23, 0x7c, 0xfa, 0xa7, 0x81, 0xcb, 0xe2, 0xad, 0x15,
- 0x53, 0x70, 0x40, 0x4c, 0xff, 0xd1, 0x74, 0xe1, 0x7b, 0x77, 0xfe, 0xf2, 0xa6, 0x73, 0xb1, 0xf1,
- 0xe7, 0x1c, 0x14, 0xc4, 0x79, 0x5b, 0x81, 0xea, 0xf1, 0xe7, 0x3d, 0xcd, 0x68, 0x77, 0x4f, 0x76,
- 0x0f, 0x34, 0xa4, 0xe0, 0x3a, 0x80, 0x00, 0x1e, 0x1d, 0x74, 0x5b, 0xc7, 0x28, 0x97, 0x8c, 0x3b,
- 0x47, 0xc7, 0x0f, 0xee, 0xa1, 0x7c, 0xa2, 0x70, 0x22, 0x81, 0x42, 0x9a, 0x70, 0x77, 0x1b, 0x15,
- 0x31, 0x82, 0x9a, 0x34, 0xd0, 0x79, 0xa2, 0xb5, 0x1f, 0xdc, 0x43, 0xa5, 0x2c, 0x72, 0x77, 0x1b,
- 0x2d, 0xe1, 0x65, 0xa8, 0x08, 0x64, 0xb7, 0xdb, 0x3d, 0x40, 0xe5, 0xc4, 0x66, 0xff, 0x58, 0xef,
- 0x1c, 0xed, 0xa3, 0x4a, 0x62, 0x73, 0x5f, 0xef, 0x9e, 0xf4, 0x10, 0x24, 0x16, 0x0e, 0xb5, 0x7e,
- 0xbf, 0xb5, 0xaf, 0xa1, 0x6a, 0xc2, 0xd8, 0xfd, 0xfc, 0x58, 0xeb, 0xa3, 0x5a, 0xc6, 0xad, 0xbb,
- 0xdb, 0x68, 0x39, 0x99, 0x42, 0x3b, 0x3a, 0x39, 0x44, 0x75, 0xbc, 0x0a, 0xcb, 0x72, 0x8a, 0xd8,
- 0x89, 0x95, 0x29, 0xe8, 0xc1, 0x3d, 0x84, 0x26, 0x8e, 0x48, 0x2b, 0xab, 0x19, 0xe0, 0xc1, 0x3d,
- 0x84, 0x1b, 0x7b, 0x50, 0x14, 0xd9, 0x85, 0x31, 0xd4, 0x0f, 0x5a, 0xbb, 0xda, 0x81, 0xd1, 0xed,
- 0x1d, 0x77, 0xba, 0x47, 0xad, 0x03, 0xa4, 0x4c, 0x30, 0x5d, 0xfb, 0xd9, 0x49, 0x47, 0xd7, 0xda,
- 0x28, 0x97, 0xc6, 0x7a, 0x5a, 0xeb, 0x58, 0x6b, 0xa3, 0x7c, 0xc3, 0x84, 0xf5, 0x79, 0x75, 0x66,
- 0xee, 0xc9, 0x48, 0x6d, 0x71, 0x6e, 0xc1, 0x16, 0x0b, 0x5b, 0x33, 0x5b, 0xfc, 0x95, 0x02, 0x6b,
- 0x73, 0x6a, 0xed, 0xdc, 0x49, 0x7e, 0x0a, 0x45, 0x99, 0xa2, 0xf2, 0xf6, 0xb9, 0x39, 0xb7, 0x68,
- 0x8b, 0x84, 0x9d, 0xb9, 0x81, 0x84, 0x5e, 0xfa, 0x06, 0xce, 0x2f, 0xb8, 0x81, 0xb9, 0x89, 0x19,
- 0x27, 0x7f, 0xa5, 0x80, 0xba, 0xc8, 0xf6, 0x6b, 0x0a, 0x45, 0x2e, 0x53, 0x28, 0x3e, 0x99, 0x76,
- 0xe0, 0xea, 0xe2, 0x35, 0xcc, 0x78, 0xf1, 0xb5, 0x02, 0x97, 0xe6, 0x37, 0x2a, 0x73, 0x7d, 0xf8,
- 0x14, 0x4a, 0x23, 0x1a, 0x9e, 0xb9, 0xf1, 0x65, 0xfd, 0xc1, 0x9c, 0x2b, 0x80, 0x8b, 0xa7, 0x63,
- 0x15, 0x69, 0xa5, 0xef, 0x90, 0xfc, 0xa2, 0x6e, 0x43, 0x7a, 0x33, 0xe3, 0xe9, 0xaf, 0x73, 0xf0,
- 0xe6, 0x5c, 0xe3, 0x73, 0x1d, 0x7d, 0x17, 0xc0, 0x66, 0xde, 0x38, 0x94, 0x17, 0xb2, 0xac, 0x4f,
- 0x15, 0x81, 0x88, 0xb3, 0xcf, 0x6b, 0xcf, 0x38, 0x4c, 0xe4, 0x79, 0x21, 0x07, 0x09, 0x09, 0xc2,
- 0xc3, 0x89, 0xa3, 0x05, 0xe1, 0xe8, 0x7b, 0x0b, 0x56, 0x3a, 0x73, 0xd7, 0x7d, 0x08, 0xc8, 0x74,
- 0x6c, 0xca, 0x42, 0x23, 0x08, 0x7d, 0x4a, 0x46, 0x36, 0x1b, 0x8a, 0x02, 0x5c, 0xde, 0x29, 0x0e,
- 0x88, 0x13, 0x50, 0x7d, 0x45, 0x8a, 0xfb, 0xb1, 0x94, 0x6b, 0x88, 0x5b, 0xc6, 0x4f, 0x69, 0x94,
- 0x32, 0x1a, 0x52, 0x9c, 0x68, 0x34, 0x7e, 0xb3, 0x04, 0xd5, 0x54, 0x5b, 0x87, 0xaf, 0x42, 0xed,
- 0x29, 0x79, 0x4e, 0x8c, 0xb8, 0x55, 0x97, 0x91, 0xa8, 0x72, 0xac, 0x17, 0xb5, 0xeb, 0x1f, 0xc2,
- 0xba, 0xa0, 0xb8, 0xe3, 0x90, 0xfa, 0x86, 0xe9, 0x90, 0x20, 0x10, 0x41, 0x2b, 0x0b, 0x2a, 0xe6,
- 0xb2, 0x2e, 0x17, 0xed, 0xc5, 0x12, 0x7c, 0x1f, 0xd6, 0x84, 0xc6, 0x68, 0xec, 0x84, 0xb6, 0xe7,
- 0x50, 0x83, 0x7f, 0x3c, 0x04, 0xa2, 0x10, 0x27, 0x9e, 0xad, 0x72, 0xc6, 0x61, 0x44, 0xe0, 0x1e,
- 0x05, 0xb8, 0x0d, 0xef, 0x0a, 0xb5, 0x21, 0x65, 0xd4, 0x27, 0x21, 0x35, 0xe8, 0x2f, 0xc7, 0xc4,
- 0x09, 0x0c, 0xc2, 0x2c, 0xe3, 0x8c, 0x04, 0x67, 0xea, 0x3a, 0x37, 0xb0, 0x9b, 0x53, 0x15, 0xfd,
- 0x32, 0x27, 0xee, 0x47, 0x3c, 0x4d, 0xd0, 0x5a, 0xcc, 0xfa, 0x8c, 0x04, 0x67, 0x78, 0x07, 0x2e,
- 0x09, 0x2b, 0x41, 0xe8, 0xdb, 0x6c, 0x68, 0x98, 0x67, 0xd4, 0x7c, 0x66, 0x8c, 0xc3, 0xc1, 0x43,
- 0xf5, 0xed, 0xf4, 0xfc, 0xc2, 0xc3, 0xbe, 0xe0, 0xec, 0x71, 0xca, 0x49, 0x38, 0x78, 0x88, 0xfb,
- 0x50, 0xe3, 0x9b, 0x31, 0xb2, 0xbf, 0xa4, 0xc6, 0xc0, 0xf5, 0xc5, 0xcd, 0x52, 0x9f, 0x73, 0xb2,
- 0x53, 0x11, 0x6c, 0x76, 0x23, 0x85, 0x43, 0xd7, 0xa2, 0x3b, 0xc5, 0x7e, 0x4f, 0xd3, 0xda, 0x7a,
- 0x35, 0xb6, 0xf2, 0xc8, 0xf5, 0x79, 0x42, 0x0d, 0xdd, 0x24, 0xc0, 0x55, 0x99, 0x50, 0x43, 0x37,
- 0x0e, 0xef, 0x7d, 0x58, 0x33, 0x4d, 0xb9, 0x66, 0xdb, 0x34, 0xa2, 0x16, 0x3f, 0x50, 0x51, 0x26,
- 0x58, 0xa6, 0xb9, 0x2f, 0x09, 0x51, 0x8e, 0x07, 0xf8, 0x63, 0x78, 0x73, 0x12, 0xac, 0xb4, 0xe2,
- 0xea, 0xcc, 0x2a, 0xa7, 0x55, 0xef, 0xc3, 0x9a, 0x77, 0x3e, 0xab, 0x88, 0x33, 0x33, 0x7a, 0xe7,
- 0xd3, 0x6a, 0xd7, 0xc5, 0x67, 0x9b, 0x4f, 0x4d, 0x12, 0x52, 0x4b, 0x7d, 0x2b, 0xcd, 0x4e, 0x09,
- 0xf0, 0x1d, 0x40, 0xa6, 0x69, 0x50, 0x46, 0x4e, 0x1d, 0x6a, 0x10, 0x9f, 0x32, 0x12, 0xa8, 0x57,
- 0xd2, 0xe4, 0xba, 0x69, 0x6a, 0x42, 0xda, 0x12, 0x42, 0x7c, 0x0b, 0x56, 0xdd, 0xd3, 0xa7, 0xa6,
- 0xcc, 0x2c, 0xc3, 0xf3, 0xe9, 0xc0, 0x7e, 0xa9, 0x5e, 0x13, 0x61, 0x5a, 0xe1, 0x02, 0x91, 0x57,
- 0x3d, 0x01, 0xe3, 0x9b, 0x80, 0xcc, 0xe0, 0x8c, 0xf8, 0x9e, 0xb8, 0xda, 0x03, 0x8f, 0x98, 0x54,
- 0xbd, 0x2e, 0xa9, 0x12, 0x3f, 0x8a, 0x61, 0xfc, 0x04, 0xd6, 0xc7, 0xcc, 0x66, 0x21, 0xf5, 0x3d,
- 0x9f, 0xf2, 0x0e, 0x5d, 0x1e, 0x33, 0xf5, 0x5f, 0x4b, 0x0b, 0x7a, 0xec, 0x93, 0x34, 0x5b, 0xee,
- 0xae, 0xbe, 0x36, 0x9e, 0x05, 0x1b, 0x3b, 0x50, 0x4b, 0x6f, 0x3a, 0xae, 0x80, 0xdc, 0x76, 0xa4,
- 0xf0, 0x0b, 0x74, 0xaf, 0xdb, 0xe6, 0x57, 0xdf, 0x17, 0x1a, 0xca, 0xf1, 0x2b, 0xf8, 0xa0, 0x73,
- 0xac, 0x19, 0xfa, 0xc9, 0xd1, 0x71, 0xe7, 0x50, 0x43, 0xf9, 0x5b, 0x95, 0xf2, 0xbf, 0x97, 0xd0,
- 0xab, 0x57, 0xaf, 0x5e, 0xe5, 0x1e, 0x17, 0xca, 0x1f, 0xa0, 0x1b, 0x8d, 0xbf, 0xe6, 0xa0, 0x9e,
- 0x6d, 0x7e, 0xf1, 0x4f, 0xe0, 0xad, 0xf8, 0x4b, 0x35, 0xa0, 0xa1, 0xf1, 0xc2, 0xf6, 0x45, 0x36,
- 0x8e, 0x88, 0x6c, 0x1f, 0x93, 0x40, 0xae, 0x47, 0xac, 0x3e, 0x0d, 0x7f, 0x61, 0xfb, 0x3c, 0xd7,
- 0x46, 0x24, 0xc4, 0x07, 0x70, 0x85, 0xb9, 0x46, 0x10, 0x12, 0x66, 0x11, 0xdf, 0x32, 0x26, 0x6f,
- 0x04, 0x06, 0x31, 0x4d, 0x1a, 0x04, 0xae, 0xbc, 0x05, 0x12, 0x2b, 0xef, 0x30, 0xb7, 0x1f, 0x91,
- 0x27, 0xe5, 0xb1, 0x15, 0x51, 0xa7, 0x36, 0x3d, 0xbf, 0x68, 0xd3, 0xdf, 0x86, 0xca, 0x88, 0x78,
- 0x06, 0x65, 0xa1, 0x7f, 0x2e, 0x5a, 0xb6, 0xb2, 0x5e, 0x1e, 0x11, 0x4f, 0xe3, 0xe3, 0xef, 0x6e,
- 0x27, 0xb2, 0xd1, 0x2c, 0xa3, 0x4a, 0xe3, 0x1f, 0x79, 0xa8, 0xa5, 0x9b, 0x37, 0xde, 0x0b, 0x9b,
- 0xa2, 0x50, 0x2b, 0xe2, 0x28, 0xbf, 0xff, 0x8d, 0xad, 0x5e, 0x73, 0x8f, 0x57, 0xf0, 0x9d, 0x92,
- 0x6c, 0xa9, 0x74, 0xa9, 0xc9, 0x6f, 0x4f, 0x7e, 0x78, 0xa9, 0x6c, 0xd4, 0xcb, 0x7a, 0x34, 0xc2,
- 0xfb, 0x50, 0x7a, 0x1a, 0x08, 0xdb, 0x25, 0x61, 0xfb, 0xda, 0x37, 0xdb, 0x7e, 0xdc, 0x17, 0xc6,
- 0x2b, 0x8f, 0xfb, 0xc6, 0x51, 0x57, 0x3f, 0x6c, 0x1d, 0xe8, 0x91, 0x3a, 0xbe, 0x0c, 0x05, 0x87,
- 0x7c, 0x79, 0x9e, 0xad, 0xf5, 0x02, 0xba, 0x68, 0xf8, 0x2f, 0x43, 0xe1, 0x05, 0x25, 0xcf, 0xb2,
- 0x15, 0x56, 0x40, 0xdf, 0xe1, 0x31, 0xb8, 0x03, 0x45, 0x11, 0x2f, 0x0c, 0x10, 0x45, 0x0c, 0xbd,
- 0x81, 0xcb, 0x50, 0xd8, 0xeb, 0xea, 0xfc, 0x28, 0x20, 0xa8, 0x49, 0xd4, 0xe8, 0x75, 0xb4, 0x3d,
- 0x0d, 0xe5, 0x1a, 0xf7, 0xa1, 0x24, 0x83, 0xc0, 0x8f, 0x49, 0x12, 0x06, 0xf4, 0x46, 0x34, 0x8c,
- 0x6c, 0x28, 0xb1, 0xf4, 0xe4, 0x70, 0x57, 0xd3, 0x51, 0x2e, 0xbb, 0xc9, 0x05, 0x54, 0x6c, 0x04,
- 0x50, 0x4b, 0x77, 0x6f, 0xdf, 0x4b, 0x7e, 0x35, 0xfe, 0xa2, 0x40, 0x35, 0xd5, 0x8d, 0xf1, 0x3e,
- 0x80, 0x38, 0x8e, 0xfb, 0xc2, 0x20, 0x8e, 0x4d, 0x82, 0x28, 0x35, 0x40, 0x40, 0x2d, 0x8e, 0x5c,
- 0x74, 0xeb, 0xbe, 0x17, 0xe7, 0xff, 0xa8, 0x00, 0x9a, 0xee, 0xe4, 0xa6, 0x1c, 0x54, 0x7e, 0x50,
- 0x07, 0xff, 0xa0, 0x40, 0x3d, 0xdb, 0xbe, 0x4d, 0xb9, 0x77, 0xf5, 0x07, 0x75, 0xef, 0xf7, 0x0a,
- 0x2c, 0x67, 0x9a, 0xb6, 0xff, 0x2b, 0xef, 0x7e, 0x97, 0x87, 0xb5, 0x39, 0x7a, 0xb8, 0x15, 0x75,
- 0xb7, 0xb2, 0xe1, 0xfe, 0xf1, 0x45, 0xe6, 0x6a, 0xf2, 0xfb, 0xb3, 0x47, 0xfc, 0x30, 0x6a, 0x86,
- 0x6f, 0x02, 0xb2, 0x2d, 0xca, 0x42, 0x7b, 0x60, 0x53, 0x3f, 0xfa, 0x22, 0x97, 0x2d, 0xef, 0xca,
- 0x04, 0x97, 0x1f, 0xe5, 0x3f, 0x02, 0xec, 0xb9, 0x81, 0x1d, 0xda, 0xcf, 0xa9, 0x61, 0xb3, 0xf8,
- 0xf3, 0x9d, 0xb7, 0xc0, 0x05, 0x1d, 0xc5, 0x92, 0x0e, 0x0b, 0x13, 0x36, 0xa3, 0x43, 0x32, 0xc5,
- 0xe6, 0x15, 0x30, 0xaf, 0xa3, 0x58, 0x92, 0xb0, 0xaf, 0x42, 0xcd, 0x72, 0xc7, 0xbc, 0xa1, 0x90,
- 0x3c, 0x5e, 0x70, 0x15, 0xbd, 0x2a, 0xb1, 0x84, 0x12, 0x75, 0x7c, 0x93, 0x77, 0x83, 0x9a, 0x5e,
- 0x95, 0x98, 0xa4, 0xdc, 0x80, 0x15, 0x32, 0x1c, 0xfa, 0xdc, 0x78, 0x6c, 0x48, 0xf6, 0xb0, 0xf5,
- 0x04, 0x16, 0xc4, 0x8d, 0xc7, 0x50, 0x8e, 0xe3, 0xc0, 0x6f, 0x36, 0x1e, 0x09, 0xc3, 0x93, 0xaf,
- 0x37, 0xb9, 0xad, 0x8a, 0x5e, 0x66, 0xb1, 0xf0, 0x2a, 0xd4, 0xec, 0xc0, 0x98, 0x3c, 0x23, 0xe6,
- 0x36, 0x73, 0x5b, 0x65, 0xbd, 0x6a, 0x07, 0xc9, 0xbb, 0x51, 0xe3, 0xeb, 0x1c, 0xd4, 0xb3, 0xcf,
- 0xa0, 0xb8, 0x0d, 0x65, 0xc7, 0x35, 0x89, 0x48, 0x04, 0xf9, 0x06, 0xbf, 0xf5, 0x9a, 0x97, 0xd3,
- 0xe6, 0x41, 0xc4, 0xd7, 0x13, 0xcd, 0x8d, 0xbf, 0x29, 0x50, 0x8e, 0x61, 0x7c, 0x09, 0x0a, 0x1e,
- 0x09, 0xcf, 0x84, 0xb9, 0xe2, 0x6e, 0x0e, 0x29, 0xba, 0x18, 0x73, 0x3c, 0xf0, 0x08, 0x13, 0x29,
- 0x10, 0xe1, 0x7c, 0xcc, 0xf7, 0xd5, 0xa1, 0xc4, 0x12, 0x0d, 0xb2, 0x3b, 0x1a, 0x51, 0x16, 0x06,
- 0xf1, 0xbe, 0x46, 0xf8, 0x5e, 0x04, 0xe3, 0xdb, 0xb0, 0x1a, 0xfa, 0xc4, 0x76, 0x32, 0xdc, 0x82,
- 0xe0, 0xa2, 0x58, 0x90, 0x90, 0x77, 0xe0, 0x72, 0x6c, 0xd7, 0xa2, 0x21, 0x31, 0xcf, 0xa8, 0x35,
- 0x51, 0x2a, 0x89, 0x37, 0xb6, 0xb7, 0x22, 0x42, 0x3b, 0x92, 0xc7, 0xba, 0x8d, 0xbf, 0x2b, 0xb0,
- 0x1a, 0xb7, 0xf4, 0x56, 0x12, 0xac, 0x43, 0x00, 0xc2, 0x98, 0x1b, 0xa6, 0xc3, 0x35, 0x9b, 0xca,
- 0x33, 0x7a, 0xcd, 0x56, 0xa2, 0xa4, 0xa7, 0x0c, 0x6c, 0x8c, 0x00, 0x26, 0x92, 0x85, 0x61, 0xbb,
- 0x02, 0xd5, 0xe8, 0x8d, 0x5b, 0xfc, 0xa3, 0x44, 0x7e, 0x04, 0x82, 0x84, 0x78, 0xef, 0x8f, 0xd7,
- 0xa1, 0x78, 0x4a, 0x87, 0x36, 0x8b, 0x5e, 0xde, 0xe4, 0x20, 0x7e, 0xcf, 0x2b, 0x24, 0xef, 0x79,
- 0xbb, 0x4f, 0x60, 0xcd, 0x74, 0x47, 0xd3, 0xee, 0xee, 0xa2, 0xa9, 0x0f, 0xd1, 0xe0, 0x33, 0xe5,
- 0x0b, 0x98, 0x74, 0x6a, 0x5f, 0xe5, 0xf2, 0xfb, 0xbd, 0xdd, 0x3f, 0xe5, 0x36, 0xf6, 0xa5, 0x5e,
- 0x2f, 0x5e, 0xa6, 0x4e, 0x07, 0x0e, 0x35, 0xb9, 0xeb, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x5f,
- 0x1c, 0x48, 0x4f, 0x0d, 0x1a, 0x00, 0x00,
+ // 2519 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x6e, 0x1b, 0xc7,
+ 0x15, 0x0e, 0x7f, 0x45, 0x1e, 0x52, 0xd4, 0x68, 0xa4, 0xd8, 0x6b, 0xe5, 0xc7, 0x32, 0xf3, 0x63,
+ 0xd9, 0x69, 0xa8, 0x40, 0xb1, 0x1d, 0x47, 0x29, 0xd2, 0x52, 0xe4, 0x5a, 0xa1, 0x4a, 0x91, 0xec,
+ 0x92, 0x6a, 0x7e, 0x6e, 0x16, 0xa3, 0xdd, 0x21, 0xb9, 0xf6, 0x72, 0x77, 0xb3, 0xbb, 0xb4, 0xad,
+ 0xa0, 0x17, 0x06, 0x7a, 0x55, 0xa0, 0x0f, 0x50, 0x14, 0x45, 0x2f, 0x72, 0x13, 0xa0, 0x0f, 0x50,
+ 0x20, 0x77, 0x7d, 0x82, 0x02, 0x79, 0x83, 0xa2, 0x28, 0xd0, 0x3e, 0x46, 0x31, 0x33, 0xbb, 0xcb,
+ 0x5d, 0xfe, 0xc4, 0x6a, 0x80, 0x38, 0x57, 0xe4, 0x7c, 0xe7, 0x3b, 0x67, 0xce, 0x9c, 0x39, 0x33,
+ 0x73, 0x66, 0x16, 0x76, 0x47, 0xb6, 0x3d, 0x32, 0xe9, 0xbe, 0xe3, 0xda, 0xbe, 0x7d, 0x3e, 0x1d,
+ 0xee, 0xeb, 0xd4, 0xd3, 0x5c, 0xc3, 0xf1, 0x6d, 0xb7, 0xc6, 0x31, 0xbc, 0x21, 0x18, 0xb5, 0x90,
+ 0x51, 0x3d, 0x85, 0xcd, 0x07, 0x86, 0x49, 0x9b, 0x11, 0xb1, 0x4f, 0x7d, 0x7c, 0x1f, 0xb2, 0x43,
+ 0xc3, 0xa4, 0x52, 0x6a, 0x37, 0xb3, 0x57, 0x3a, 0x78, 0xb3, 0x36, 0xa7, 0x54, 0x4b, 0x6a, 0xf4,
+ 0x18, 0xac, 0x70, 0x8d, 0xea, 0xbf, 0xb3, 0xb0, 0xb5, 0x44, 0x8a, 0x31, 0x64, 0x2d, 0x32, 0x61,
+ 0x16, 0x53, 0x7b, 0x45, 0x85, 0xff, 0xc7, 0x12, 0xac, 0x39, 0x44, 0x7b, 0x44, 0x46, 0x54, 0x4a,
+ 0x73, 0x38, 0x6c, 0xe2, 0xd7, 0x01, 0x74, 0xea, 0x50, 0x4b, 0xa7, 0x96, 0x76, 0x21, 0x65, 0x76,
+ 0x33, 0x7b, 0x45, 0x25, 0x86, 0xe0, 0x77, 0x60, 0xd3, 0x99, 0x9e, 0x9b, 0x86, 0xa6, 0xc6, 0x68,
+ 0xb0, 0x9b, 0xd9, 0xcb, 0x29, 0x48, 0x08, 0x9a, 0x33, 0xf2, 0x4d, 0xd8, 0x78, 0x42, 0xc9, 0xa3,
+ 0x38, 0xb5, 0xc4, 0xa9, 0x15, 0x06, 0xc7, 0x88, 0x0d, 0x28, 0x4f, 0xa8, 0xe7, 0x91, 0x11, 0x55,
+ 0xfd, 0x0b, 0x87, 0x4a, 0x59, 0x3e, 0xfa, 0xdd, 0x85, 0xd1, 0xcf, 0x8f, 0xbc, 0x14, 0x68, 0x0d,
+ 0x2e, 0x1c, 0x8a, 0xeb, 0x50, 0xa4, 0xd6, 0x74, 0x22, 0x2c, 0xe4, 0x56, 0xc4, 0x4f, 0xb6, 0xa6,
+ 0x93, 0x79, 0x2b, 0x05, 0xa6, 0x16, 0x98, 0x58, 0xf3, 0xa8, 0xfb, 0xd8, 0xd0, 0xa8, 0x94, 0xe7,
+ 0x06, 0x6e, 0x2e, 0x18, 0xe8, 0x0b, 0xf9, 0xbc, 0x8d, 0x50, 0x0f, 0x37, 0xa0, 0x48, 0x9f, 0xfa,
+ 0xd4, 0xf2, 0x0c, 0xdb, 0x92, 0xd6, 0xb8, 0x91, 0xb7, 0x96, 0xcc, 0x22, 0x35, 0xf5, 0x79, 0x13,
+ 0x33, 0x3d, 0x7c, 0x0f, 0xd6, 0x6c, 0xc7, 0x37, 0x6c, 0xcb, 0x93, 0x0a, 0xbb, 0xa9, 0xbd, 0xd2,
+ 0xc1, 0xab, 0x4b, 0x13, 0xa1, 0x2b, 0x38, 0x4a, 0x48, 0xc6, 0x2d, 0x40, 0x9e, 0x3d, 0x75, 0x35,
+ 0xaa, 0x6a, 0xb6, 0x4e, 0x55, 0xc3, 0x1a, 0xda, 0x52, 0x91, 0x1b, 0xb8, 0xbe, 0x38, 0x10, 0x4e,
+ 0x6c, 0xd8, 0x3a, 0x6d, 0x59, 0x43, 0x5b, 0xa9, 0x78, 0x89, 0x36, 0xbe, 0x02, 0x79, 0xef, 0xc2,
+ 0xf2, 0xc9, 0x53, 0xa9, 0xcc, 0x33, 0x24, 0x68, 0x55, 0xbf, 0xcd, 0xc3, 0xc6, 0x65, 0x52, 0xec,
+ 0x23, 0xc8, 0x0d, 0xd9, 0x28, 0xa5, 0xf4, 0xff, 0x13, 0x03, 0xa1, 0x93, 0x0c, 0x62, 0xfe, 0x07,
+ 0x06, 0xb1, 0x0e, 0x25, 0x8b, 0x7a, 0x3e, 0xd5, 0x45, 0x46, 0x64, 0x2e, 0x99, 0x53, 0x20, 0x94,
+ 0x16, 0x53, 0x2a, 0xfb, 0x83, 0x52, 0xea, 0x33, 0xd8, 0x88, 0x5c, 0x52, 0x5d, 0x62, 0x8d, 0xc2,
+ 0xdc, 0xdc, 0x7f, 0x9e, 0x27, 0x35, 0x39, 0xd4, 0x53, 0x98, 0x9a, 0x52, 0xa1, 0x89, 0x36, 0x6e,
+ 0x02, 0xd8, 0x16, 0xb5, 0x87, 0xaa, 0x4e, 0x35, 0x53, 0x2a, 0xac, 0x88, 0x52, 0x97, 0x51, 0x16,
+ 0xa2, 0x64, 0x0b, 0x54, 0x33, 0xf1, 0x87, 0xb3, 0x54, 0x5b, 0x5b, 0x91, 0x29, 0xa7, 0x62, 0x91,
+ 0x2d, 0x64, 0xdb, 0x19, 0x54, 0x5c, 0xca, 0xf2, 0x9e, 0xea, 0xc1, 0xc8, 0x8a, 0xdc, 0x89, 0xda,
+ 0x73, 0x47, 0xa6, 0x04, 0x6a, 0x62, 0x60, 0xeb, 0x6e, 0xbc, 0x89, 0xdf, 0x80, 0x08, 0x50, 0x79,
+ 0x5a, 0x01, 0xdf, 0x85, 0xca, 0x21, 0xd8, 0x21, 0x13, 0xba, 0xf3, 0x15, 0x54, 0x92, 0xe1, 0xc1,
+ 0xdb, 0x90, 0xf3, 0x7c, 0xe2, 0xfa, 0x3c, 0x0b, 0x73, 0x8a, 0x68, 0x60, 0x04, 0x19, 0x6a, 0xe9,
+ 0x7c, 0x97, 0xcb, 0x29, 0xec, 0x2f, 0xfe, 0xe5, 0x6c, 0xc0, 0x19, 0x3e, 0xe0, 0xb7, 0x17, 0x67,
+ 0x34, 0x61, 0x79, 0x7e, 0xdc, 0x3b, 0x1f, 0xc0, 0x7a, 0x62, 0x00, 0x97, 0xed, 0xba, 0xfa, 0x5b,
+ 0x78, 0x79, 0xa9, 0x69, 0xfc, 0x19, 0x6c, 0x4f, 0x2d, 0xc3, 0xf2, 0xa9, 0xeb, 0xb8, 0x94, 0x65,
+ 0xac, 0xe8, 0x4a, 0xfa, 0xcf, 0xda, 0x8a, 0x9c, 0x3b, 0x8b, 0xb3, 0x85, 0x15, 0x65, 0x6b, 0xba,
+ 0x08, 0xde, 0x2e, 0x16, 0xfe, 0xbb, 0x86, 0x9e, 0x3d, 0x7b, 0xf6, 0x2c, 0x5d, 0xfd, 0x63, 0x1e,
+ 0xb6, 0x97, 0xad, 0x99, 0xa5, 0xcb, 0xf7, 0x0a, 0xe4, 0xad, 0xe9, 0xe4, 0x9c, 0xba, 0x3c, 0x48,
+ 0x39, 0x25, 0x68, 0xe1, 0x3a, 0xe4, 0x4c, 0x72, 0x4e, 0x4d, 0x29, 0xbb, 0x9b, 0xda, 0xab, 0x1c,
+ 0xbc, 0x73, 0xa9, 0x55, 0x59, 0x6b, 0x33, 0x15, 0x45, 0x68, 0xe2, 0x8f, 0x21, 0x1b, 0x6c, 0xd1,
+ 0xcc, 0xc2, 0xed, 0xcb, 0x59, 0x60, 0x6b, 0x49, 0xe1, 0x7a, 0xf8, 0x15, 0x28, 0xb2, 0x5f, 0x91,
+ 0x1b, 0x79, 0xee, 0x73, 0x81, 0x01, 0x2c, 0x2f, 0xf0, 0x0e, 0x14, 0xf8, 0x32, 0xd1, 0x69, 0x78,
+ 0xb4, 0x45, 0x6d, 0x96, 0x58, 0x3a, 0x1d, 0x92, 0xa9, 0xe9, 0xab, 0x8f, 0x89, 0x39, 0xa5, 0x3c,
+ 0xe1, 0x8b, 0x4a, 0x39, 0x00, 0x7f, 0xc3, 0x30, 0x7c, 0x1d, 0x4a, 0x62, 0x55, 0x19, 0x96, 0x4e,
+ 0x9f, 0xf2, 0xdd, 0x33, 0xa7, 0x88, 0x85, 0xd6, 0x62, 0x08, 0xeb, 0xfe, 0xa1, 0x67, 0x5b, 0x61,
+ 0x6a, 0xf2, 0x2e, 0x18, 0xc0, 0xbb, 0xff, 0x60, 0x7e, 0xe3, 0x7e, 0x6d, 0xf9, 0xf0, 0xe6, 0x73,
+ 0xaa, 0xfa, 0xb7, 0x34, 0x64, 0xf9, 0x7e, 0xb1, 0x01, 0xa5, 0xc1, 0xe7, 0x3d, 0x59, 0x6d, 0x76,
+ 0xcf, 0x8e, 0xda, 0x32, 0x4a, 0xe1, 0x0a, 0x00, 0x07, 0x1e, 0xb4, 0xbb, 0xf5, 0x01, 0x4a, 0x47,
+ 0xed, 0x56, 0x67, 0x70, 0xef, 0x0e, 0xca, 0x44, 0x0a, 0x67, 0x02, 0xc8, 0xc6, 0x09, 0xef, 0x1f,
+ 0xa0, 0x1c, 0x46, 0x50, 0x16, 0x06, 0x5a, 0x9f, 0xc9, 0xcd, 0x7b, 0x77, 0x50, 0x3e, 0x89, 0xbc,
+ 0x7f, 0x80, 0xd6, 0xf0, 0x3a, 0x14, 0x39, 0x72, 0xd4, 0xed, 0xb6, 0x51, 0x21, 0xb2, 0xd9, 0x1f,
+ 0x28, 0xad, 0xce, 0x31, 0x2a, 0x46, 0x36, 0x8f, 0x95, 0xee, 0x59, 0x0f, 0x41, 0x64, 0xe1, 0x54,
+ 0xee, 0xf7, 0xeb, 0xc7, 0x32, 0x2a, 0x45, 0x8c, 0xa3, 0xcf, 0x07, 0x72, 0x1f, 0x95, 0x13, 0x6e,
+ 0xbd, 0x7f, 0x80, 0xd6, 0xa3, 0x2e, 0xe4, 0xce, 0xd9, 0x29, 0xaa, 0xe0, 0x4d, 0x58, 0x17, 0x5d,
+ 0x84, 0x4e, 0x6c, 0xcc, 0x41, 0xf7, 0xee, 0x20, 0x34, 0x73, 0x44, 0x58, 0xd9, 0x4c, 0x00, 0xf7,
+ 0xee, 0x20, 0x5c, 0x6d, 0x40, 0x8e, 0x67, 0x17, 0xc6, 0x50, 0x69, 0xd7, 0x8f, 0xe4, 0xb6, 0xda,
+ 0xed, 0x0d, 0x5a, 0xdd, 0x4e, 0xbd, 0x8d, 0x52, 0x33, 0x4c, 0x91, 0x7f, 0x7d, 0xd6, 0x52, 0xe4,
+ 0x26, 0x4a, 0xc7, 0xb1, 0x9e, 0x5c, 0x1f, 0xc8, 0x4d, 0x94, 0xa9, 0x6a, 0xb0, 0xbd, 0x6c, 0x9f,
+ 0x5c, 0xba, 0x32, 0x62, 0x53, 0x9c, 0x5e, 0x31, 0xc5, 0xdc, 0xd6, 0xc2, 0x14, 0x7f, 0x9d, 0x82,
+ 0xad, 0x25, 0x67, 0xc5, 0xd2, 0x4e, 0x7e, 0x01, 0x39, 0x91, 0xa2, 0xe2, 0xf4, 0xbc, 0xb5, 0xf4,
+ 0xd0, 0xe1, 0x09, 0xbb, 0x70, 0x82, 0x72, 0xbd, 0x78, 0x05, 0x91, 0x59, 0x51, 0x41, 0x30, 0x13,
+ 0x0b, 0x4e, 0xfe, 0x2e, 0x05, 0xd2, 0x2a, 0xdb, 0xcf, 0xd9, 0x28, 0xd2, 0x89, 0x8d, 0xe2, 0xa3,
+ 0x79, 0x07, 0x6e, 0xac, 0x1e, 0xc3, 0x82, 0x17, 0xdf, 0xa4, 0xe0, 0xca, 0xf2, 0x42, 0x6b, 0xa9,
+ 0x0f, 0x1f, 0x43, 0x7e, 0x42, 0xfd, 0xb1, 0x1d, 0x16, 0x1b, 0x6f, 0x2f, 0x39, 0xc2, 0x98, 0x78,
+ 0x3e, 0x56, 0x81, 0x56, 0xfc, 0x0c, 0xcc, 0xac, 0xaa, 0x96, 0x84, 0x37, 0x0b, 0x9e, 0xfe, 0x3e,
+ 0x0d, 0x2f, 0x2f, 0x35, 0xbe, 0xd4, 0xd1, 0xd7, 0x00, 0x0c, 0xcb, 0x99, 0xfa, 0xa2, 0xa0, 0x10,
+ 0xfb, 0x53, 0x91, 0x23, 0x7c, 0xed, 0xb3, 0xbd, 0x67, 0xea, 0x47, 0xf2, 0x0c, 0x97, 0x83, 0x80,
+ 0x38, 0xe1, 0xfe, 0xcc, 0xd1, 0x2c, 0x77, 0xf4, 0xf5, 0x15, 0x23, 0x5d, 0x38, 0xab, 0xdf, 0x03,
+ 0xa4, 0x99, 0x06, 0xb5, 0x7c, 0xd5, 0xf3, 0x5d, 0x4a, 0x26, 0x86, 0x35, 0xe2, 0x1b, 0x70, 0xe1,
+ 0x30, 0x37, 0x24, 0xa6, 0x47, 0x95, 0x0d, 0x21, 0xee, 0x87, 0x52, 0xa6, 0xc1, 0xcf, 0x38, 0x37,
+ 0xa6, 0x91, 0x4f, 0x68, 0x08, 0x71, 0xa4, 0x51, 0xfd, 0xb6, 0x00, 0xa5, 0x58, 0x59, 0x8a, 0x6f,
+ 0x40, 0xf9, 0x21, 0x79, 0x4c, 0xd4, 0xf0, 0xaa, 0x21, 0x22, 0x51, 0x62, 0x58, 0x2f, 0xb8, 0x6e,
+ 0xbc, 0x07, 0xdb, 0x9c, 0x62, 0x4f, 0x7d, 0xea, 0xaa, 0x9a, 0x49, 0x3c, 0x8f, 0x07, 0xad, 0xc0,
+ 0xa9, 0x98, 0xc9, 0xba, 0x4c, 0xd4, 0x08, 0x25, 0xf8, 0x2e, 0x6c, 0x71, 0x8d, 0xc9, 0xd4, 0xf4,
+ 0x0d, 0xc7, 0xa4, 0x2a, 0xbb, 0xfc, 0x78, 0x7c, 0x23, 0x8e, 0x3c, 0xdb, 0x64, 0x8c, 0xd3, 0x80,
+ 0xc0, 0x3c, 0xf2, 0x70, 0x13, 0x5e, 0xe3, 0x6a, 0x23, 0x6a, 0x51, 0x97, 0xf8, 0x54, 0xa5, 0x5f,
+ 0x4e, 0x89, 0xe9, 0xa9, 0xc4, 0xd2, 0xd5, 0x31, 0xf1, 0xc6, 0xd2, 0x36, 0x33, 0x70, 0x94, 0x96,
+ 0x52, 0xca, 0x35, 0x46, 0x3c, 0x0e, 0x78, 0x32, 0xa7, 0xd5, 0x2d, 0xfd, 0x13, 0xe2, 0x8d, 0xf1,
+ 0x21, 0x5c, 0xe1, 0x56, 0x3c, 0xdf, 0x35, 0xac, 0x91, 0xaa, 0x8d, 0xa9, 0xf6, 0x48, 0x9d, 0xfa,
+ 0xc3, 0xfb, 0xd2, 0x2b, 0xf1, 0xfe, 0xb9, 0x87, 0x7d, 0xce, 0x69, 0x30, 0xca, 0x99, 0x3f, 0xbc,
+ 0x8f, 0xfb, 0x50, 0x66, 0x93, 0x31, 0x31, 0xbe, 0xa2, 0xea, 0xd0, 0x76, 0xf9, 0xc9, 0x52, 0x59,
+ 0xb2, 0xb2, 0x63, 0x11, 0xac, 0x75, 0x03, 0x85, 0x53, 0x5b, 0xa7, 0x87, 0xb9, 0x7e, 0x4f, 0x96,
+ 0x9b, 0x4a, 0x29, 0xb4, 0xf2, 0xc0, 0x76, 0x59, 0x42, 0x8d, 0xec, 0x28, 0xc0, 0x25, 0x91, 0x50,
+ 0x23, 0x3b, 0x0c, 0xef, 0x5d, 0xd8, 0xd2, 0x34, 0x31, 0x66, 0x43, 0x53, 0x83, 0x2b, 0x8a, 0x27,
+ 0xa1, 0x44, 0xb0, 0x34, 0xed, 0x58, 0x10, 0x82, 0x1c, 0xf7, 0xf0, 0x87, 0xf0, 0xf2, 0x2c, 0x58,
+ 0x71, 0xc5, 0xcd, 0x85, 0x51, 0xce, 0xab, 0xde, 0x85, 0x2d, 0xe7, 0x62, 0x51, 0x11, 0x27, 0x7a,
+ 0x74, 0x2e, 0xe6, 0xd5, 0x3e, 0x80, 0x6d, 0x67, 0xec, 0x2c, 0xea, 0xdd, 0x8e, 0xeb, 0x61, 0x67,
+ 0xec, 0xcc, 0x2b, 0xbe, 0xc5, 0xef, 0xab, 0x2e, 0xd5, 0x88, 0x4f, 0x75, 0xe9, 0x6a, 0x9c, 0x1e,
+ 0x13, 0xe0, 0x7d, 0x40, 0x9a, 0xa6, 0x52, 0x8b, 0x9c, 0x9b, 0x54, 0x25, 0x2e, 0xb5, 0x88, 0x27,
+ 0x5d, 0x8f, 0x93, 0x2b, 0x9a, 0x26, 0x73, 0x69, 0x9d, 0x0b, 0xf1, 0x6d, 0xd8, 0xb4, 0xcf, 0x1f,
+ 0x6a, 0x22, 0x25, 0x55, 0xc7, 0xa5, 0x43, 0xe3, 0xa9, 0xf4, 0x26, 0x8f, 0xef, 0x06, 0x13, 0xf0,
+ 0x84, 0xec, 0x71, 0x18, 0xdf, 0x02, 0xa4, 0x79, 0x63, 0xe2, 0x3a, 0xbc, 0x26, 0xf0, 0x1c, 0xa2,
+ 0x51, 0xe9, 0x2d, 0x41, 0x15, 0x78, 0x27, 0x84, 0xd9, 0x92, 0xf0, 0x9e, 0x18, 0x43, 0x3f, 0xb4,
+ 0x78, 0x53, 0x2c, 0x09, 0x8e, 0x05, 0xd6, 0xf6, 0x00, 0xb1, 0x50, 0x24, 0x3a, 0xde, 0xe3, 0xb4,
+ 0x8a, 0x33, 0x76, 0xe2, 0xfd, 0xbe, 0x01, 0xeb, 0x8c, 0x39, 0xeb, 0xf4, 0x96, 0xa8, 0x67, 0x9c,
+ 0x71, 0xac, 0xc7, 0x1f, 0xad, 0xb4, 0xac, 0x1e, 0x42, 0x39, 0x9e, 0x9f, 0xb8, 0x08, 0x22, 0x43,
+ 0x51, 0x8a, 0x9d, 0xf5, 0x8d, 0x6e, 0x93, 0x9d, 0xd2, 0x5f, 0xc8, 0x28, 0xcd, 0xaa, 0x85, 0x76,
+ 0x6b, 0x20, 0xab, 0xca, 0x59, 0x67, 0xd0, 0x3a, 0x95, 0x51, 0x26, 0x56, 0x96, 0x9e, 0x64, 0x0b,
+ 0x6f, 0xa3, 0x9b, 0xd5, 0xef, 0xd2, 0x50, 0x49, 0xde, 0x33, 0xf0, 0xcf, 0xe1, 0x6a, 0xf8, 0x28,
+ 0xe0, 0x51, 0x5f, 0x7d, 0x62, 0xb8, 0x7c, 0xe1, 0x4c, 0x88, 0xa8, 0xb3, 0xa3, 0xa9, 0xdb, 0x0e,
+ 0x58, 0x7d, 0xea, 0x7f, 0x6a, 0xb8, 0x6c, 0x59, 0x4c, 0x88, 0x8f, 0xdb, 0x70, 0xdd, 0xb2, 0x55,
+ 0xcf, 0x27, 0x96, 0x4e, 0x5c, 0x5d, 0x9d, 0x3d, 0xc7, 0xa8, 0x44, 0xd3, 0xa8, 0xe7, 0xd9, 0xe2,
+ 0xc0, 0x8a, 0xac, 0xbc, 0x6a, 0xd9, 0xfd, 0x80, 0x3c, 0xdb, 0xc9, 0xeb, 0x01, 0x75, 0x2e, 0xcd,
+ 0x32, 0xab, 0xd2, 0xec, 0x15, 0x28, 0x4e, 0x88, 0xa3, 0x52, 0xcb, 0x77, 0x2f, 0x78, 0x75, 0x59,
+ 0x50, 0x0a, 0x13, 0xe2, 0xc8, 0xac, 0xfd, 0x42, 0x8a, 0xfc, 0x93, 0x6c, 0xa1, 0x80, 0x8a, 0x27,
+ 0xd9, 0x42, 0x11, 0x41, 0xf5, 0x5f, 0x19, 0x28, 0xc7, 0xab, 0x4d, 0x56, 0xbc, 0x6b, 0xfc, 0x64,
+ 0x49, 0xf1, 0xbd, 0xe7, 0x8d, 0xef, 0xad, 0x4d, 0x6b, 0x0d, 0x76, 0xe4, 0x1c, 0xe6, 0x45, 0x0d,
+ 0xa8, 0x08, 0x4d, 0x76, 0xdc, 0xb3, 0xdd, 0x86, 0x8a, 0x7b, 0x4d, 0x41, 0x09, 0x5a, 0xf8, 0x18,
+ 0xf2, 0x0f, 0x3d, 0x6e, 0x3b, 0xcf, 0x6d, 0xbf, 0xf9, 0xfd, 0xb6, 0x4f, 0xfa, 0xdc, 0x78, 0xf1,
+ 0xa4, 0xaf, 0x76, 0xba, 0xca, 0x69, 0xbd, 0xad, 0x04, 0xea, 0xf8, 0x1a, 0x64, 0x4d, 0xf2, 0xd5,
+ 0x45, 0xf2, 0x70, 0xe2, 0xd0, 0x65, 0x27, 0xe1, 0x1a, 0x64, 0x9f, 0x50, 0xf2, 0x28, 0x79, 0x24,
+ 0x70, 0xe8, 0x47, 0x5c, 0x0c, 0xfb, 0x90, 0xe3, 0xf1, 0xc2, 0x00, 0x41, 0xc4, 0xd0, 0x4b, 0xb8,
+ 0x00, 0xd9, 0x46, 0x57, 0x61, 0x0b, 0x02, 0x41, 0x59, 0xa0, 0x6a, 0xaf, 0x25, 0x37, 0x64, 0x94,
+ 0xae, 0xde, 0x85, 0xbc, 0x08, 0x02, 0x5b, 0x2c, 0x51, 0x18, 0xd0, 0x4b, 0x41, 0x33, 0xb0, 0x91,
+ 0x0a, 0xa5, 0x67, 0xa7, 0x47, 0xb2, 0x82, 0xd2, 0xc9, 0xa9, 0xce, 0xa2, 0x5c, 0xd5, 0x83, 0x72,
+ 0xbc, 0xdc, 0x7c, 0x31, 0x57, 0xc9, 0xbf, 0xa7, 0xa0, 0x14, 0x2b, 0x1f, 0x59, 0xe1, 0x42, 0x4c,
+ 0xd3, 0x7e, 0xa2, 0x12, 0xd3, 0x20, 0x5e, 0x90, 0x1a, 0xc0, 0xa1, 0x3a, 0x43, 0x2e, 0x3b, 0x75,
+ 0x2f, 0x68, 0x89, 0xe4, 0x50, 0xbe, 0xfa, 0x97, 0x14, 0xa0, 0xf9, 0x02, 0x74, 0xce, 0xcd, 0xd4,
+ 0x4f, 0xe9, 0x66, 0xf5, 0xcf, 0x29, 0xa8, 0x24, 0xab, 0xce, 0x39, 0xf7, 0x6e, 0xfc, 0xa4, 0xee,
+ 0xfd, 0x33, 0x0d, 0xeb, 0x89, 0x5a, 0xf3, 0xb2, 0xde, 0x7d, 0x09, 0x9b, 0x86, 0x4e, 0x27, 0x8e,
+ 0xed, 0x53, 0x4b, 0xbb, 0x50, 0x4d, 0xfa, 0x98, 0x9a, 0x52, 0x95, 0x6f, 0x1a, 0xfb, 0xdf, 0x5f,
+ 0xcd, 0xd6, 0x5a, 0x33, 0xbd, 0x36, 0x53, 0x3b, 0xdc, 0x6a, 0x35, 0xe5, 0xd3, 0x5e, 0x77, 0x20,
+ 0x77, 0x1a, 0x9f, 0xab, 0x67, 0x9d, 0x5f, 0x75, 0xba, 0x9f, 0x76, 0x14, 0x64, 0xcc, 0xd1, 0x7e,
+ 0xc4, 0x65, 0xdf, 0x03, 0x34, 0xef, 0x14, 0xbe, 0x0a, 0xcb, 0xdc, 0x42, 0x2f, 0xe1, 0x2d, 0xd8,
+ 0xe8, 0x74, 0xd5, 0x7e, 0xab, 0x29, 0xab, 0xf2, 0x83, 0x07, 0x72, 0x63, 0xd0, 0x17, 0xd7, 0xfb,
+ 0x88, 0x3d, 0x48, 0x2c, 0xf0, 0xea, 0x9f, 0x32, 0xb0, 0xb5, 0xc4, 0x13, 0x5c, 0x0f, 0x6e, 0x16,
+ 0xe2, 0xb2, 0xf3, 0xee, 0x65, 0xbc, 0xaf, 0xb1, 0x82, 0xa0, 0x47, 0x5c, 0x3f, 0xb8, 0x88, 0xdc,
+ 0x02, 0x16, 0x25, 0xcb, 0x37, 0x86, 0x06, 0x75, 0x83, 0xd7, 0x10, 0x71, 0xdd, 0xd8, 0x98, 0xe1,
+ 0xe2, 0x41, 0xe4, 0x67, 0x80, 0x1d, 0xdb, 0x33, 0x7c, 0xe3, 0x31, 0x55, 0x0d, 0x2b, 0x7c, 0x3a,
+ 0x61, 0xd7, 0x8f, 0xac, 0x82, 0x42, 0x49, 0xcb, 0xf2, 0x23, 0xb6, 0x45, 0x47, 0x64, 0x8e, 0xcd,
+ 0x36, 0xf3, 0x8c, 0x82, 0x42, 0x49, 0xc4, 0xbe, 0x01, 0x65, 0xdd, 0x9e, 0xb2, 0x9a, 0x4c, 0xf0,
+ 0xd8, 0xd9, 0x91, 0x52, 0x4a, 0x02, 0x8b, 0x28, 0x41, 0xb5, 0x3d, 0x7b, 0xb3, 0x29, 0x2b, 0x25,
+ 0x81, 0x09, 0xca, 0x4d, 0xd8, 0x20, 0xa3, 0x91, 0xcb, 0x8c, 0x87, 0x86, 0xc4, 0xfd, 0xa1, 0x12,
+ 0xc1, 0x9c, 0xb8, 0x73, 0x02, 0x85, 0x30, 0x0e, 0xec, 0xa8, 0x66, 0x91, 0x50, 0x1d, 0xf1, 0x6e,
+ 0x97, 0xde, 0x2b, 0x2a, 0x05, 0x2b, 0x14, 0xde, 0x80, 0xb2, 0xe1, 0xa9, 0xb3, 0x27, 0xe8, 0xf4,
+ 0x6e, 0x7a, 0xaf, 0xa0, 0x94, 0x0c, 0x2f, 0x7a, 0xbe, 0xab, 0x7e, 0x93, 0x86, 0x4a, 0xf2, 0x09,
+ 0x1d, 0x37, 0xa1, 0x60, 0xda, 0x1a, 0xe1, 0xa9, 0x25, 0xbe, 0xdf, 0xec, 0x3d, 0xe7, 0xd5, 0xbd,
+ 0xd6, 0x0e, 0xf8, 0x4a, 0xa4, 0xb9, 0xf3, 0x8f, 0x14, 0x14, 0x42, 0x18, 0x5f, 0x81, 0xac, 0x43,
+ 0xfc, 0x31, 0x37, 0x97, 0x3b, 0x4a, 0xa3, 0x94, 0xc2, 0xdb, 0x0c, 0xf7, 0x1c, 0x62, 0xf1, 0x14,
+ 0x08, 0x70, 0xd6, 0x66, 0xf3, 0x6a, 0x52, 0xa2, 0xf3, 0xcb, 0x89, 0x3d, 0x99, 0x50, 0xcb, 0xf7,
+ 0xc2, 0x79, 0x0d, 0xf0, 0x46, 0x00, 0xe3, 0x77, 0x60, 0xd3, 0x77, 0x89, 0x61, 0x26, 0xb8, 0x59,
+ 0xce, 0x45, 0xa1, 0x20, 0x22, 0x1f, 0xc2, 0xb5, 0xd0, 0xae, 0x4e, 0x7d, 0xa2, 0x8d, 0xa9, 0x3e,
+ 0x53, 0xca, 0xf3, 0xf7, 0xd9, 0xab, 0x01, 0xa1, 0x19, 0xc8, 0x43, 0xdd, 0xea, 0x77, 0x29, 0xd8,
+ 0x0c, 0xaf, 0x53, 0x7a, 0x14, 0xac, 0x53, 0x00, 0x62, 0x59, 0xb6, 0x1f, 0x0f, 0xd7, 0x62, 0x2a,
+ 0x2f, 0xe8, 0xd5, 0xea, 0x91, 0x92, 0x12, 0x33, 0xb0, 0x33, 0x01, 0x98, 0x49, 0x56, 0x86, 0xed,
+ 0x3a, 0x94, 0x82, 0xef, 0x23, 0xfc, 0x23, 0x9b, 0xb8, 0x80, 0x83, 0x80, 0xd8, 0xbd, 0x0b, 0x6f,
+ 0x43, 0xee, 0x9c, 0x8e, 0x0c, 0x2b, 0x78, 0xf5, 0x14, 0x8d, 0xf0, 0x25, 0x37, 0x1b, 0xbd, 0xe4,
+ 0x1e, 0xfd, 0x21, 0x05, 0x5b, 0x9a, 0x3d, 0x99, 0xf7, 0xf7, 0x08, 0xcd, 0xbd, 0x02, 0x78, 0x9f,
+ 0xa4, 0xbe, 0xf8, 0x78, 0x64, 0xf8, 0xe3, 0xe9, 0x79, 0x4d, 0xb3, 0x27, 0xfb, 0x23, 0xdb, 0x24,
+ 0xd6, 0x68, 0xf6, 0x95, 0x90, 0xff, 0xd1, 0xde, 0x1d, 0x51, 0xeb, 0xdd, 0x91, 0x1d, 0xfb, 0x66,
+ 0xf8, 0xd1, 0xec, 0xef, 0xd7, 0xe9, 0xcc, 0x71, 0xef, 0xe8, 0xaf, 0xe9, 0x9d, 0x63, 0xd1, 0x57,
+ 0x2f, 0x8c, 0x8d, 0x42, 0x87, 0x26, 0xd5, 0xd8, 0x78, 0xff, 0x17, 0x00, 0x00, 0xff, 0xff, 0x0c,
+ 0xab, 0xb6, 0x37, 0x7e, 0x1c, 0x00, 0x00,
}
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
new file mode 100644
index 0000000..4d4fb37
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto
@@ -0,0 +1,849 @@
+// Protocol Buffers - Google's data interchange format
+// Copyright 2008 Google Inc. All rights reserved.
+// https://developers.google.com/protocol-buffers/
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Author: kenton@google.com (Kenton Varda)
+// Based on original Protocol Buffers design by
+// Sanjay Ghemawat, Jeff Dean, and others.
+//
+// The messages in this file describe the definitions found in .proto files.
+// A valid .proto file can be translated directly to a FileDescriptorProto
+// without any other information (e.g. without reading its imports).
+
+
+syntax = "proto2";
+
+package google.protobuf;
+option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor";
+option java_package = "com.google.protobuf";
+option java_outer_classname = "DescriptorProtos";
+option csharp_namespace = "Google.Protobuf.Reflection";
+option objc_class_prefix = "GPB";
+
+// descriptor.proto must be optimized for speed because reflection-based
+// algorithms don't work during bootstrapping.
+option optimize_for = SPEED;
+
+// The protocol compiler can output a FileDescriptorSet containing the .proto
+// files it parses.
+message FileDescriptorSet {
+ repeated FileDescriptorProto file = 1;
+}
+
+// Describes a complete .proto file.
+message FileDescriptorProto {
+ optional string name = 1; // file name, relative to root of source tree
+ optional string package = 2; // e.g. "foo", "foo.bar", etc.
+
+ // Names of files imported by this file.
+ repeated string dependency = 3;
+ // Indexes of the public imported files in the dependency list above.
+ repeated int32 public_dependency = 10;
+ // Indexes of the weak imported files in the dependency list.
+ // For Google-internal migration only. Do not use.
+ repeated int32 weak_dependency = 11;
+
+ // All top-level definitions in this file.
+ repeated DescriptorProto message_type = 4;
+ repeated EnumDescriptorProto enum_type = 5;
+ repeated ServiceDescriptorProto service = 6;
+ repeated FieldDescriptorProto extension = 7;
+
+ optional FileOptions options = 8;
+
+ // This field contains optional information about the original source code.
+ // You may safely remove this entire field without harming runtime
+ // functionality of the descriptors -- the information is needed only by
+ // development tools.
+ optional SourceCodeInfo source_code_info = 9;
+
+ // The syntax of the proto file.
+ // The supported values are "proto2" and "proto3".
+ optional string syntax = 12;
+}
+
+// Describes a message type.
+message DescriptorProto {
+ optional string name = 1;
+
+ repeated FieldDescriptorProto field = 2;
+ repeated FieldDescriptorProto extension = 6;
+
+ repeated DescriptorProto nested_type = 3;
+ repeated EnumDescriptorProto enum_type = 4;
+
+ message ExtensionRange {
+ optional int32 start = 1;
+ optional int32 end = 2;
+
+ optional ExtensionRangeOptions options = 3;
+ }
+ repeated ExtensionRange extension_range = 5;
+
+ repeated OneofDescriptorProto oneof_decl = 8;
+
+ optional MessageOptions options = 7;
+
+ // Range of reserved tag numbers. Reserved tag numbers may not be used by
+ // fields or extension ranges in the same message. Reserved ranges may
+ // not overlap.
+ message ReservedRange {
+ optional int32 start = 1; // Inclusive.
+ optional int32 end = 2; // Exclusive.
+ }
+ repeated ReservedRange reserved_range = 9;
+ // Reserved field names, which may not be used by fields in the same message.
+ // A given name may only be reserved once.
+ repeated string reserved_name = 10;
+}
+
+message ExtensionRangeOptions {
+ // The parser stores options it doesn't recognize here. See above.
+ repeated UninterpretedOption uninterpreted_option = 999;
+
+ // Clients can define custom options in extensions of this message. See above.
+ extensions 1000 to max;
+}
+
+// Describes a field within a message.
+message FieldDescriptorProto {
+ enum Type {
+ // 0 is reserved for errors.
+ // Order is weird for historical reasons.
+ TYPE_DOUBLE = 1;
+ TYPE_FLOAT = 2;
+ // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if
+ // negative values are likely.
+ TYPE_INT64 = 3;
+ TYPE_UINT64 = 4;
+ // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if
+ // negative values are likely.
+ TYPE_INT32 = 5;
+ TYPE_FIXED64 = 6;
+ TYPE_FIXED32 = 7;
+ TYPE_BOOL = 8;
+ TYPE_STRING = 9;
+ // Tag-delimited aggregate.
+ // Group type is deprecated and not supported in proto3. However, Proto3
+ // implementations should still be able to parse the group wire format and
+ // treat group fields as unknown fields.
+ TYPE_GROUP = 10;
+ TYPE_MESSAGE = 11; // Length-delimited aggregate.
+
+ // New in version 2.
+ TYPE_BYTES = 12;
+ TYPE_UINT32 = 13;
+ TYPE_ENUM = 14;
+ TYPE_SFIXED32 = 15;
+ TYPE_SFIXED64 = 16;
+ TYPE_SINT32 = 17; // Uses ZigZag encoding.
+ TYPE_SINT64 = 18; // Uses ZigZag encoding.
+ };
+
+ enum Label {
+ // 0 is reserved for errors
+ LABEL_OPTIONAL = 1;
+ LABEL_REQUIRED = 2;
+ LABEL_REPEATED = 3;
+ };
+
+ optional string name = 1;
+ optional int32 number = 3;
+ optional Label label = 4;
+
+ // If type_name is set, this need not be set. If both this and type_name
+ // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
+ optional Type type = 5;
+
+ // For message and enum types, this is the name of the type. If the name
+ // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping
+ // rules are used to find the type (i.e. first the nested types within this
+ // message are searched, then within the parent, on up to the root
+ // namespace).
+ optional string type_name = 6;
+
+ // For extensions, this is the name of the type being extended. It is
+ // resolved in the same manner as type_name.
+ optional string extendee = 2;
+
+ // For numeric types, contains the original text representation of the value.
+ // For booleans, "true" or "false".
+ // For strings, contains the default text contents (not escaped in any way).
+ // For bytes, contains the C escaped value. All bytes >= 128 are escaped.
+ // TODO(kenton): Base-64 encode?
+ optional string default_value = 7;
+
+ // If set, gives the index of a oneof in the containing type's oneof_decl
+ // list. This field is a member of that oneof.
+ optional int32 oneof_index = 9;
+
+ // JSON name of this field. The value is set by protocol compiler. If the
+ // user has set a "json_name" option on this field, that option's value
+ // will be used. Otherwise, it's deduced from the field's name by converting
+ // it to camelCase.
+ optional string json_name = 10;
+
+ optional FieldOptions options = 8;
+}
+
+// Describes a oneof.
+message OneofDescriptorProto {
+ optional string name = 1;
+ optional OneofOptions options = 2;
+}
+
+// Describes an enum type.
+message EnumDescriptorProto {
+ optional string name = 1;
+
+ repeated EnumValueDescriptorProto value = 2;
+
+ optional EnumOptions options = 3;
+}
+
+// Describes a value within an enum.
+message EnumValueDescriptorProto {
+ optional string name = 1;
+ optional int32 number = 2;
+
+ optional EnumValueOptions options = 3;
+}
+
+// Describes a service.
+message ServiceDescriptorProto {
+ optional string name = 1;
+ repeated MethodDescriptorProto method = 2;
+
+ optional ServiceOptions options = 3;
+}
+
+// Describes a method of a service.
+message MethodDescriptorProto {
+ optional string name = 1;
+
+ // Input and output type names. These are resolved in the same way as
+ // FieldDescriptorProto.type_name, but must refer to a message type.
+ optional string input_type = 2;
+ optional string output_type = 3;
+
+ optional MethodOptions options = 4;
+
+ // Identifies if client streams multiple client messages
+ optional bool client_streaming = 5 [default=false];
+ // Identifies if server streams multiple server messages
+ optional bool server_streaming = 6 [default=false];
+}
+
+
+// ===================================================================
+// Options
+
+// Each of the definitions above may have "options" attached. These are
+// just annotations which may cause code to be generated slightly differently
+// or may contain hints for code that manipulates protocol messages.
+//
+// Clients may define custom options as extensions of the *Options messages.
+// These extensions may not yet be known at parsing time, so the parser cannot
+// store the values in them. Instead it stores them in a field in the *Options
+// message called uninterpreted_option. This field must have the same name
+// across all *Options messages. We then use this field to populate the
+// extensions when we build a descriptor, at which point all protos have been
+// parsed and so all extensions are known.
+//
+// Extension numbers for custom options may be chosen as follows:
+// * For options which will only be used within a single application or
+// organization, or for experimental options, use field numbers 50000
+// through 99999. It is up to you to ensure that you do not use the
+// same number for multiple options.
+// * For options which will be published and used publicly by multiple
+// independent entities, e-mail protobuf-global-extension-registry@google.com
+// to reserve extension numbers. Simply provide your project name (e.g.
+// Objective-C plugin) and your project website (if available) -- there's no
+// need to explain how you intend to use them. Usually you only need one
+// extension number. You can declare multiple options with only one extension
+// number by putting them in a sub-message. See the Custom Options section of
+// the docs for examples:
+// https://developers.google.com/protocol-buffers/docs/proto#options
+// If this turns out to be popular, a web service will be set up
+// to automatically assign option numbers.
+
+
+message FileOptions {
+
+ // Sets the Java package where classes generated from this .proto will be
+ // placed. By default, the proto package is used, but this is often
+ // inappropriate because proto packages do not normally start with backwards
+ // domain names.
+ optional string java_package = 1;
+
+
+ // If set, all the classes from the .proto file are wrapped in a single
+ // outer class with the given name. This applies to both Proto1
+ // (equivalent to the old "--one_java_file" option) and Proto2 (where
+ // a .proto always translates to a single class, but you may want to
+ // explicitly choose the class name).
+ optional string java_outer_classname = 8;
+
+ // If set true, then the Java code generator will generate a separate .java
+ // file for each top-level message, enum, and service defined in the .proto
+ // file. Thus, these types will *not* be nested inside the outer class
+ // named by java_outer_classname. However, the outer class will still be
+ // generated to contain the file's getDescriptor() method as well as any
+ // top-level extensions defined in the file.
+ optional bool java_multiple_files = 10 [default=false];
+
+ // This option does nothing.
+ optional bool java_generate_equals_and_hash = 20 [deprecated=true];
+
+ // If set true, then the Java2 code generator will generate code that
+ // throws an exception whenever an attempt is made to assign a non-UTF-8
+ // byte sequence to a string field.
+ // Message reflection will do the same.
+ // However, an extension field still accepts non-UTF-8 byte sequences.
+ // This option has no effect on when used with the lite runtime.
+ optional bool java_string_check_utf8 = 27 [default=false];
+
+
+ // Generated classes can be optimized for speed or code size.
+ enum OptimizeMode {
+ SPEED = 1; // Generate complete code for parsing, serialization,
+ // etc.
+ CODE_SIZE = 2; // Use ReflectionOps to implement these methods.
+ LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime.
+ }
+ optional OptimizeMode optimize_for = 9 [default=SPEED];
+
+ // Sets the Go package where structs generated from this .proto will be
+ // placed. If omitted, the Go package will be derived from the following:
+ // - The basename of the package import path, if provided.
+ // - Otherwise, the package statement in the .proto file, if present.
+ // - Otherwise, the basename of the .proto file, without extension.
+ optional string go_package = 11;
+
+
+
+ // Should generic services be generated in each language? "Generic" services
+ // are not specific to any particular RPC system. They are generated by the
+ // main code generators in each language (without additional plugins).
+ // Generic services were the only kind of service generation supported by
+ // early versions of google.protobuf.
+ //
+ // Generic services are now considered deprecated in favor of using plugins
+ // that generate code specific to your particular RPC system. Therefore,
+ // these default to false. Old code which depends on generic services should
+ // explicitly set them to true.
+ optional bool cc_generic_services = 16 [default=false];
+ optional bool java_generic_services = 17 [default=false];
+ optional bool py_generic_services = 18 [default=false];
+ optional bool php_generic_services = 42 [default=false];
+
+ // Is this file deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for everything in the file, or it will be completely ignored; in the very
+ // least, this is a formalization for deprecating files.
+ optional bool deprecated = 23 [default=false];
+
+ // Enables the use of arenas for the proto messages in this file. This applies
+ // only to generated classes for C++.
+ optional bool cc_enable_arenas = 31 [default=false];
+
+
+ // Sets the objective c class prefix which is prepended to all objective c
+ // generated classes from this .proto. There is no default.
+ optional string objc_class_prefix = 36;
+
+ // Namespace for generated classes; defaults to the package.
+ optional string csharp_namespace = 37;
+
+ // By default Swift generators will take the proto package and CamelCase it
+ // replacing '.' with underscore and use that to prefix the types/symbols
+ // defined. When this options is provided, they will use this value instead
+ // to prefix the types/symbols defined.
+ optional string swift_prefix = 39;
+
+ // Sets the php class prefix which is prepended to all php generated classes
+ // from this .proto. Default is empty.
+ optional string php_class_prefix = 40;
+
+ // Use this option to change the namespace of php generated classes. Default
+ // is empty. When this option is empty, the package name will be used for
+ // determining the namespace.
+ optional string php_namespace = 41;
+
+ // The parser stores options it doesn't recognize here. See above.
+ repeated UninterpretedOption uninterpreted_option = 999;
+
+ // Clients can define custom options in extensions of this message. See above.
+ extensions 1000 to max;
+
+ reserved 38;
+}
+
+message MessageOptions {
+ // Set true to use the old proto1 MessageSet wire format for extensions.
+ // This is provided for backwards-compatibility with the MessageSet wire
+ // format. You should not use this for any other reason: It's less
+ // efficient, has fewer features, and is more complicated.
+ //
+ // The message must be defined exactly as follows:
+ // message Foo {
+ // option message_set_wire_format = true;
+ // extensions 4 to max;
+ // }
+ // Note that the message cannot have any defined fields; MessageSets only
+ // have extensions.
+ //
+ // All extensions of your type must be singular messages; e.g. they cannot
+ // be int32s, enums, or repeated messages.
+ //
+ // Because this is an option, the above two restrictions are not enforced by
+ // the protocol compiler.
+ optional bool message_set_wire_format = 1 [default=false];
+
+ // Disables the generation of the standard "descriptor()" accessor, which can
+ // conflict with a field of the same name. This is meant to make migration
+ // from proto1 easier; new code should avoid fields named "descriptor".
+ optional bool no_standard_descriptor_accessor = 2 [default=false];
+
+ // Is this message deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the message, or it will be completely ignored; in the very least,
+ // this is a formalization for deprecating messages.
+ optional bool deprecated = 3 [default=false];
+
+ // Whether the message is an automatically generated map entry type for the
+ // maps field.
+ //
+ // For maps fields:
+ // map<KeyType, ValueType> map_field = 1;
+ // The parsed descriptor looks like:
+ // message MapFieldEntry {
+ // option map_entry = true;
+ // optional KeyType key = 1;
+ // optional ValueType value = 2;
+ // }
+ // repeated MapFieldEntry map_field = 1;
+ //
+ // Implementations may choose not to generate the map_entry=true message, but
+ // use a native map in the target language to hold the keys and values.
+ // The reflection APIs in such implementions still need to work as
+ // if the field is a repeated message field.
+ //
+ // NOTE: Do not set the option in .proto files. Always use the maps syntax
+ // instead. The option should only be implicitly set by the proto compiler
+ // parser.
+ optional bool map_entry = 7;
+
+ reserved 8; // javalite_serializable
+ reserved 9; // javanano_as_lite
+
+ // The parser stores options it doesn't recognize here. See above.
+ repeated UninterpretedOption uninterpreted_option = 999;
+
+ // Clients can define custom options in extensions of this message. See above.
+ extensions 1000 to max;
+}
+
+message FieldOptions {
+ // The ctype option instructs the C++ code generator to use a different
+ // representation of the field than it normally would. See the specific
+ // options below. This option is not yet implemented in the open source
+ // release -- sorry, we'll try to include it in a future version!
+ optional CType ctype = 1 [default = STRING];
+ enum CType {
+ // Default mode.
+ STRING = 0;
+
+ CORD = 1;
+
+ STRING_PIECE = 2;
+ }
+ // The packed option can be enabled for repeated primitive fields to enable
+ // a more efficient representation on the wire. Rather than repeatedly
+ // writing the tag and type for each element, the entire array is encoded as
+ // a single length-delimited blob. In proto3, only explicit setting it to
+ // false will avoid using packed encoding.
+ optional bool packed = 2;
+
+ // The jstype option determines the JavaScript type used for values of the
+ // field. The option is permitted only for 64 bit integral and fixed types
+ // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING
+ // is represented as JavaScript string, which avoids loss of precision that
+ // can happen when a large value is converted to a floating point JavaScript.
+ // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to
+ // use the JavaScript "number" type. The behavior of the default option
+ // JS_NORMAL is implementation dependent.
+ //
+ // This option is an enum to permit additional types to be added, e.g.
+ // goog.math.Integer.
+ optional JSType jstype = 6 [default = JS_NORMAL];
+ enum JSType {
+ // Use the default type.
+ JS_NORMAL = 0;
+
+ // Use JavaScript strings.
+ JS_STRING = 1;
+
+ // Use JavaScript numbers.
+ JS_NUMBER = 2;
+ }
+
+ // Should this field be parsed lazily? Lazy applies only to message-type
+ // fields. It means that when the outer message is initially parsed, the
+ // inner message's contents will not be parsed but instead stored in encoded
+ // form. The inner message will actually be parsed when it is first accessed.
+ //
+ // This is only a hint. Implementations are free to choose whether to use
+ // eager or lazy parsing regardless of the value of this option. However,
+ // setting this option true suggests that the protocol author believes that
+ // using lazy parsing on this field is worth the additional bookkeeping
+ // overhead typically needed to implement it.
+ //
+ // This option does not affect the public interface of any generated code;
+ // all method signatures remain the same. Furthermore, thread-safety of the
+ // interface is not affected by this option; const methods remain safe to
+ // call from multiple threads concurrently, while non-const methods continue
+ // to require exclusive access.
+ //
+ //
+ // Note that implementations may choose not to check required fields within
+ // a lazy sub-message. That is, calling IsInitialized() on the outer message
+ // may return true even if the inner message has missing required fields.
+ // This is necessary because otherwise the inner message would have to be
+ // parsed in order to perform the check, defeating the purpose of lazy
+ // parsing. An implementation which chooses not to check required fields
+ // must be consistent about it. That is, for any particular sub-message, the
+ // implementation must either *always* check its required fields, or *never*
+ // check its required fields, regardless of whether or not the message has
+ // been parsed.
+ optional bool lazy = 5 [default=false];
+
+ // Is this field deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for accessors, or it will be completely ignored; in the very least, this
+ // is a formalization for deprecating fields.
+ optional bool deprecated = 3 [default=false];
+
+ // For Google-internal migration only. Do not use.
+ optional bool weak = 10 [default=false];
+
+
+ // The parser stores options it doesn't recognize here. See above.
+ repeated UninterpretedOption uninterpreted_option = 999;
+
+ // Clients can define custom options in extensions of this message. See above.
+ extensions 1000 to max;
+
+ reserved 4; // removed jtype
+}
+
+message OneofOptions {
+ // The parser stores options it doesn't recognize here. See above.
+ repeated UninterpretedOption uninterpreted_option = 999;
+
+ // Clients can define custom options in extensions of this message. See above.
+ extensions 1000 to max;
+}
+
+message EnumOptions {
+
+ // Set this option to true to allow mapping different tag names to the same
+ // value.
+ optional bool allow_alias = 2;
+
+ // Is this enum deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the enum, or it will be completely ignored; in the very least, this
+ // is a formalization for deprecating enums.
+ optional bool deprecated = 3 [default=false];
+
+ reserved 5; // javanano_as_lite
+
+ // The parser stores options it doesn't recognize here. See above.
+ repeated UninterpretedOption uninterpreted_option = 999;
+
+ // Clients can define custom options in extensions of this message. See above.
+ extensions 1000 to max;
+}
+
+message EnumValueOptions {
+ // Is this enum value deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the enum value, or it will be completely ignored; in the very least,
+ // this is a formalization for deprecating enum values.
+ optional bool deprecated = 1 [default=false];
+
+ // The parser stores options it doesn't recognize here. See above.
+ repeated UninterpretedOption uninterpreted_option = 999;
+
+ // Clients can define custom options in extensions of this message. See above.
+ extensions 1000 to max;
+}
+
+message ServiceOptions {
+
+ // Note: Field numbers 1 through 32 are reserved for Google's internal RPC
+ // framework. We apologize for hoarding these numbers to ourselves, but
+ // we were already using them long before we decided to release Protocol
+ // Buffers.
+
+ // Is this service deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the service, or it will be completely ignored; in the very least,
+ // this is a formalization for deprecating services.
+ optional bool deprecated = 33 [default=false];
+
+ // The parser stores options it doesn't recognize here. See above.
+ repeated UninterpretedOption uninterpreted_option = 999;
+
+ // Clients can define custom options in extensions of this message. See above.
+ extensions 1000 to max;
+}
+
+message MethodOptions {
+
+ // Note: Field numbers 1 through 32 are reserved for Google's internal RPC
+ // framework. We apologize for hoarding these numbers to ourselves, but
+ // we were already using them long before we decided to release Protocol
+ // Buffers.
+
+ // Is this method deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the method, or it will be completely ignored; in the very least,
+ // this is a formalization for deprecating methods.
+ optional bool deprecated = 33 [default=false];
+
+ // Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
+ // or neither? HTTP based RPC implementation may choose GET verb for safe
+ // methods, and PUT verb for idempotent methods instead of the default POST.
+ enum IdempotencyLevel {
+ IDEMPOTENCY_UNKNOWN = 0;
+ NO_SIDE_EFFECTS = 1; // implies idempotent
+ IDEMPOTENT = 2; // idempotent, but may have side effects
+ }
+ optional IdempotencyLevel idempotency_level =
+ 34 [default=IDEMPOTENCY_UNKNOWN];
+
+ // The parser stores options it doesn't recognize here. See above.
+ repeated UninterpretedOption uninterpreted_option = 999;
+
+ // Clients can define custom options in extensions of this message. See above.
+ extensions 1000 to max;
+}
+
+
+// A message representing a option the parser does not recognize. This only
+// appears in options protos created by the compiler::Parser class.
+// DescriptorPool resolves these when building Descriptor objects. Therefore,
+// options protos in descriptor objects (e.g. returned by Descriptor::options(),
+// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
+// in them.
+message UninterpretedOption {
+ // The name of the uninterpreted option. Each string represents a segment in
+ // a dot-separated name. is_extension is true iff a segment represents an
+ // extension (denoted with parentheses in options specs in .proto files).
+ // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
+ // "foo.(bar.baz).qux".
+ message NamePart {
+ required string name_part = 1;
+ required bool is_extension = 2;
+ }
+ repeated NamePart name = 2;
+
+ // The value of the uninterpreted option, in whatever type the tokenizer
+ // identified it as during parsing. Exactly one of these should be set.
+ optional string identifier_value = 3;
+ optional uint64 positive_int_value = 4;
+ optional int64 negative_int_value = 5;
+ optional double double_value = 6;
+ optional bytes string_value = 7;
+ optional string aggregate_value = 8;
+}
+
+// ===================================================================
+// Optional source code info
+
+// Encapsulates information about the original source file from which a
+// FileDescriptorProto was generated.
+message SourceCodeInfo {
+ // A Location identifies a piece of source code in a .proto file which
+ // corresponds to a particular definition. This information is intended
+ // to be useful to IDEs, code indexers, documentation generators, and similar
+ // tools.
+ //
+ // For example, say we have a file like:
+ // message Foo {
+ // optional string foo = 1;
+ // }
+ // Let's look at just the field definition:
+ // optional string foo = 1;
+ // ^ ^^ ^^ ^ ^^^
+ // a bc de f ghi
+ // We have the following locations:
+ // span path represents
+ // [a,i) [ 4, 0, 2, 0 ] The whole field definition.
+ // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional).
+ // [c,d) [ 4, 0, 2, 0, 5 ] The type (string).
+ // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo).
+ // [g,h) [ 4, 0, 2, 0, 3 ] The number (1).
+ //
+ // Notes:
+ // - A location may refer to a repeated field itself (i.e. not to any
+ // particular index within it). This is used whenever a set of elements are
+ // logically enclosed in a single code segment. For example, an entire
+ // extend block (possibly containing multiple extension definitions) will
+ // have an outer location whose path refers to the "extensions" repeated
+ // field without an index.
+ // - Multiple locations may have the same path. This happens when a single
+ // logical declaration is spread out across multiple places. The most
+ // obvious example is the "extend" block again -- there may be multiple
+ // extend blocks in the same scope, each of which will have the same path.
+ // - A location's span is not always a subset of its parent's span. For
+ // example, the "extendee" of an extension declaration appears at the
+ // beginning of the "extend" block and is shared by all extensions within
+ // the block.
+ // - Just because a location's span is a subset of some other location's span
+ // does not mean that it is a descendent. For example, a "group" defines
+ // both a type and a field in a single declaration. Thus, the locations
+ // corresponding to the type and field and their components will overlap.
+ // - Code which tries to interpret locations should probably be designed to
+ // ignore those that it doesn't understand, as more types of locations could
+ // be recorded in the future.
+ repeated Location location = 1;
+ message Location {
+ // Identifies which part of the FileDescriptorProto was defined at this
+ // location.
+ //
+ // Each element is a field number or an index. They form a path from
+ // the root FileDescriptorProto to the place where the definition. For
+ // example, this path:
+ // [ 4, 3, 2, 7, 1 ]
+ // refers to:
+ // file.message_type(3) // 4, 3
+ // .field(7) // 2, 7
+ // .name() // 1
+ // This is because FileDescriptorProto.message_type has field number 4:
+ // repeated DescriptorProto message_type = 4;
+ // and DescriptorProto.field has field number 2:
+ // repeated FieldDescriptorProto field = 2;
+ // and FieldDescriptorProto.name has field number 1:
+ // optional string name = 1;
+ //
+ // Thus, the above path gives the location of a field name. If we removed
+ // the last element:
+ // [ 4, 3, 2, 7 ]
+ // this path refers to the whole field declaration (from the beginning
+ // of the label to the terminating semicolon).
+ repeated int32 path = 1 [packed=true];
+
+ // Always has exactly three or four elements: start line, start column,
+ // end line (optional, otherwise assumed same as start line), end column.
+ // These are packed into a single field for efficiency. Note that line
+ // and column numbers are zero-based -- typically you will want to add
+ // 1 to each before displaying to a user.
+ repeated int32 span = 2 [packed=true];
+
+ // If this SourceCodeInfo represents a complete declaration, these are any
+ // comments appearing before and after the declaration which appear to be
+ // attached to the declaration.
+ //
+ // A series of line comments appearing on consecutive lines, with no other
+ // tokens appearing on those lines, will be treated as a single comment.
+ //
+ // leading_detached_comments will keep paragraphs of comments that appear
+ // before (but not connected to) the current element. Each paragraph,
+ // separated by empty lines, will be one comment element in the repeated
+ // field.
+ //
+ // Only the comment content is provided; comment markers (e.g. //) are
+ // stripped out. For block comments, leading whitespace and an asterisk
+ // will be stripped from the beginning of each line other than the first.
+ // Newlines are included in the output.
+ //
+ // Examples:
+ //
+ // optional int32 foo = 1; // Comment attached to foo.
+ // // Comment attached to bar.
+ // optional int32 bar = 2;
+ //
+ // optional string baz = 3;
+ // // Comment attached to baz.
+ // // Another line attached to baz.
+ //
+ // // Comment attached to qux.
+ // //
+ // // Another line attached to qux.
+ // optional double qux = 4;
+ //
+ // // Detached comment for corge. This is not leading or trailing comments
+ // // to qux or corge because there are blank lines separating it from
+ // // both.
+ //
+ // // Detached comment for corge paragraph 2.
+ //
+ // optional string corge = 5;
+ // /* Block comment attached
+ // * to corge. Leading asterisks
+ // * will be removed. */
+ // /* Block comment attached to
+ // * grault. */
+ // optional int32 grault = 6;
+ //
+ // // ignored detached comments.
+ optional string leading_comments = 3;
+ optional string trailing_comments = 4;
+ repeated string leading_detached_comments = 6;
+ }
+}
+
+// Describes the relationship between generated code and its original source
+// file. A GeneratedCodeInfo message is associated with only one generated
+// source file, but may contain references to different source .proto files.
+message GeneratedCodeInfo {
+ // An Annotation connects some span of text in generated code to an element
+ // of its generating .proto file.
+ repeated Annotation annotation = 1;
+ message Annotation {
+ // Identifies the element in the original source .proto file. This field
+ // is formatted the same as SourceCodeInfo.Location.path.
+ repeated int32 path = 1 [packed=true];
+
+ // Identifies the filesystem path to the original source .proto.
+ optional string source_file = 2;
+
+ // Identifies the starting offset in bytes in the generated code
+ // that relates to the identified object.
+ optional int32 begin = 3;
+
+ // Identifies the ending offset in bytes in the generated code that
+ // relates to the identified offset. The end offset should be one past
+ // the last relevant byte (so the length of the text = end - begin).
+ optional int32 end = 4;
+ }
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go
index 89e07ae..b2af97f 100644
--- a/vendor/github.com/golang/protobuf/ptypes/any.go
+++ b/vendor/github.com/golang/protobuf/ptypes/any.go
@@ -51,6 +51,9 @@ const googleApis = "type.googleapis.com/"
// function. AnyMessageName is provided for less common use cases like filtering a
// sequence of Any messages based on a set of allowed message type names.
func AnyMessageName(any *any.Any) (string, error) {
+ if any == nil {
+ return "", fmt.Errorf("message is nil")
+ }
slash := strings.LastIndex(any.TypeUrl, "/")
if slash < 0 {
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
index f2c6906..f346017 100644
--- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
+++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
@@ -1,12 +1,11 @@
-// Code generated by protoc-gen-go.
-// source: github.com/golang/protobuf/ptypes/any/any.proto
-// DO NOT EDIT!
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/any.proto
/*
Package any is a generated protocol buffer package.
It is generated from these files:
- github.com/golang/protobuf/ptypes/any/any.proto
+ google/protobuf/any.proto
It has these top-level messages:
Any
@@ -63,6 +62,16 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// any.Unpack(foo)
// ...
//
+// Example 4: Pack and unpack a message in Go
+//
+// foo := &pb.Foo{...}
+// any, err := ptypes.MarshalAny(foo)
+// ...
+// foo := &pb.Foo{}
+// if err := ptypes.UnmarshalAny(any, foo); err != nil {
+// ...
+// }
+//
// The pack methods provided by protobuf library will by default use
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
// methods only use the fully qualified type name after the last '/'
@@ -132,24 +141,38 @@ func (*Any) ProtoMessage() {}
func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (*Any) XXX_WellKnownType() string { return "Any" }
+func (m *Any) GetTypeUrl() string {
+ if m != nil {
+ return m.TypeUrl
+ }
+ return ""
+}
+
+func (m *Any) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
func init() {
proto.RegisterType((*Any)(nil), "google.protobuf.Any")
}
-func init() { proto.RegisterFile("github.com/golang/protobuf/ptypes/any/any.proto", fileDescriptor0) }
+func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
- // 187 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xd2, 0x4f, 0xcf, 0x2c, 0xc9,
- 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
- 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcc,
- 0xab, 0x04, 0x61, 0x3d, 0xb0, 0xb8, 0x10, 0x7f, 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x1e, 0x4c,
- 0x95, 0x92, 0x19, 0x17, 0xb3, 0x63, 0x5e, 0xa5, 0x90, 0x24, 0x17, 0x07, 0x48, 0x79, 0x7c, 0x69,
- 0x51, 0x8e, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x3b, 0x88, 0x1f, 0x5a, 0x94, 0x23, 0x24,
- 0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a, 0x2a, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x13, 0x04, 0xe1,
- 0x38, 0x15, 0x71, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe7, 0xc4, 0xe1, 0x98, 0x57, 0x19,
- 0x00, 0xe2, 0x04, 0x30, 0x46, 0xa9, 0x12, 0xe5, 0xb8, 0x05, 0x8c, 0x8c, 0x8b, 0x98, 0x98, 0xdd,
- 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x4c, 0x0b, 0x80, 0xaa, 0xd2, 0x0b, 0x4f, 0xcd, 0xc9,
- 0xf1, 0xce, 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0xa9, 0x4e, 0x62, 0x03, 0x6b, 0x37, 0x06, 0x04, 0x00,
- 0x00, 0xff, 0xff, 0xc6, 0x4d, 0x03, 0x23, 0xf6, 0x00, 0x00, 0x00,
+ // 185 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f,
+ 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4,
+ 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a,
+ 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46,
+ 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7,
+ 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce,
+ 0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52,
+ 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc,
+ 0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c,
+ 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce,
+ 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff,
+ 0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00,
}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/vendor/github.com/golang/protobuf/ptypes/any/any.proto
index 81dcf46..c748667 100644
--- a/vendor/github.com/golang/protobuf/ptypes/any/any.proto
+++ b/vendor/github.com/golang/protobuf/ptypes/any/any.proto
@@ -37,7 +37,6 @@ option go_package = "github.com/golang/protobuf/ptypes/any";
option java_package = "com.google.protobuf";
option java_outer_classname = "AnyProto";
option java_multiple_files = true;
-option java_generate_equals_and_hash = true;
option objc_class_prefix = "GPB";
// `Any` contains an arbitrary serialized protocol buffer message along with a
@@ -75,6 +74,16 @@ option objc_class_prefix = "GPB";
// any.Unpack(foo)
// ...
//
+// Example 4: Pack and unpack a message in Go
+//
+// foo := &pb.Foo{...}
+// any, err := ptypes.MarshalAny(foo)
+// ...
+// foo := &pb.Foo{}
+// if err := ptypes.UnmarshalAny(any, foo); err != nil {
+// ...
+// }
+//
// The pack methods provided by protobuf library will by default use
// 'type.googleapis.com/full.type.name' as the type URL and the unpack
// methods only use the fully qualified type name after the last '/'
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
index 5697483..b2410a0 100644
--- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
+++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
@@ -1,12 +1,11 @@
-// Code generated by protoc-gen-go.
-// source: github.com/golang/protobuf/ptypes/duration/duration.proto
-// DO NOT EDIT!
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/duration.proto
/*
Package duration is a generated protocol buffer package.
It is generated from these files:
- github.com/golang/protobuf/ptypes/duration/duration.proto
+ google/protobuf/duration.proto
It has these top-level messages:
Duration
@@ -35,6 +34,8 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// two Timestamp values is a Duration and it can be added or subtracted
// from a Timestamp. Range is approximately +-10,000 years.
//
+// # Examples
+//
// Example 1: Compute Duration from two Timestamps in pseudo code.
//
// Timestamp start = ...;
@@ -69,10 +70,27 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// end.nanos -= 1000000000;
// }
//
+// Example 3: Compute Duration from datetime.timedelta in Python.
+//
+// td = datetime.timedelta(days=3, minutes=10)
+// duration = Duration()
+// duration.FromTimedelta(td)
+//
+// # JSON Mapping
+//
+// In JSON format, the Duration type is encoded as a string rather than an
+// object, where the string ends in the suffix "s" (indicating seconds) and
+// is preceded by the number of seconds, with nanoseconds expressed as
+// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
+// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
+// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
+// microsecond should be expressed in JSON format as "3.000001s".
+//
//
type Duration struct {
// Signed seconds of the span of time. Must be from -315,576,000,000
- // to +315,576,000,000 inclusive.
+ // to +315,576,000,000 inclusive. Note: these bounds are computed from:
+ // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
// Signed fractions of a second at nanosecond resolution of the span
// of time. Durations less than one second are represented with a 0
@@ -89,26 +107,38 @@ func (*Duration) ProtoMessage() {}
func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (*Duration) XXX_WellKnownType() string { return "Duration" }
-func init() {
- proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
+func (m *Duration) GetSeconds() int64 {
+ if m != nil {
+ return m.Seconds
+ }
+ return 0
+}
+
+func (m *Duration) GetNanos() int32 {
+ if m != nil {
+ return m.Nanos
+ }
+ return 0
}
func init() {
- proto.RegisterFile("github.com/golang/protobuf/ptypes/duration/duration.proto", fileDescriptor0)
+ proto.RegisterType((*Duration)(nil), "google.protobuf.Duration")
}
+func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor0) }
+
var fileDescriptor0 = []byte{
- // 189 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9,
- 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
- 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0x29,
- 0x2d, 0x4a, 0x2c, 0xc9, 0xcc, 0xcf, 0x83, 0x33, 0xf4, 0xc0, 0x2a, 0x84, 0xf8, 0xd3, 0xf3, 0xf3,
- 0xd3, 0x73, 0x52, 0xf5, 0x60, 0xea, 0x95, 0xac, 0xb8, 0x38, 0x5c, 0xa0, 0x4a, 0x84, 0x24, 0xb8,
- 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, 0x60,
- 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d, 0xd6,
- 0x20, 0x08, 0xc7, 0xa9, 0x86, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x48, 0x27, 0x5e, 0x98,
- 0x81, 0x01, 0x20, 0x91, 0x00, 0xc6, 0x28, 0x2d, 0xe2, 0xdd, 0xbb, 0x80, 0x91, 0x71, 0x11, 0x13,
- 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xb9, 0x01, 0x50, 0xa5, 0x7a, 0xe1, 0xa9,
- 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x2d, 0x49, 0x6c, 0x60, 0x33, 0x8c, 0x01,
- 0x01, 0x00, 0x00, 0xff, 0xff, 0x62, 0xfb, 0xb1, 0x51, 0x0e, 0x01, 0x00, 0x00,
+ // 190 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f,
+ 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a,
+ 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56,
+ 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5,
+ 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e,
+ 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c,
+ 0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56,
+ 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e,
+ 0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4,
+ 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78,
+ 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63,
+ 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00,
}
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
index 96c1796..975fce4 100644
--- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
+++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto
@@ -33,11 +33,11 @@ syntax = "proto3";
package google.protobuf;
option csharp_namespace = "Google.Protobuf.WellKnownTypes";
+option cc_enable_arenas = true;
option go_package = "github.com/golang/protobuf/ptypes/duration";
option java_package = "com.google.protobuf";
option java_outer_classname = "DurationProto";
option java_multiple_files = true;
-option java_generate_equals_and_hash = true;
option objc_class_prefix = "GPB";
// A Duration represents a signed, fixed-length span of time represented
@@ -47,6 +47,8 @@ option objc_class_prefix = "GPB";
// two Timestamp values is a Duration and it can be added or subtracted
// from a Timestamp. Range is approximately +-10,000 years.
//
+// # Examples
+//
// Example 1: Compute Duration from two Timestamps in pseudo code.
//
// Timestamp start = ...;
@@ -81,11 +83,28 @@ option objc_class_prefix = "GPB";
// end.nanos -= 1000000000;
// }
//
+// Example 3: Compute Duration from datetime.timedelta in Python.
+//
+// td = datetime.timedelta(days=3, minutes=10)
+// duration = Duration()
+// duration.FromTimedelta(td)
+//
+// # JSON Mapping
+//
+// In JSON format, the Duration type is encoded as a string rather than an
+// object, where the string ends in the suffix "s" (indicating seconds) and
+// is preceded by the number of seconds, with nanoseconds expressed as
+// fractional seconds. For example, 3 seconds with 0 nanoseconds should be
+// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should
+// be expressed in JSON format as "3.000000001s", and 3 seconds and 1
+// microsecond should be expressed in JSON format as "3.000001s".
+//
//
message Duration {
// Signed seconds of the span of time. Must be from -315,576,000,000
- // to +315,576,000,000 inclusive.
+ // to +315,576,000,000 inclusive. Note: these bounds are computed from:
+ // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years
int64 seconds = 1;
// Signed fractions of a second at nanosecond resolution of the span
diff --git a/vendor/github.com/golang/protobuf/ptypes/regen.sh b/vendor/github.com/golang/protobuf/ptypes/regen.sh
index 2a5b4e8..b50a941 100755
--- a/vendor/github.com/golang/protobuf/ptypes/regen.sh
+++ b/vendor/github.com/golang/protobuf/ptypes/regen.sh
@@ -8,14 +8,7 @@
PKG=github.com/golang/protobuf/ptypes
UPSTREAM=https://github.com/google/protobuf
UPSTREAM_SUBDIR=src/google/protobuf
-PROTO_FILES='
- any.proto
- duration.proto
- empty.proto
- struct.proto
- timestamp.proto
- wrappers.proto
-'
+PROTO_FILES=(any duration empty struct timestamp wrappers)
function die() {
echo 1>&2 $*
@@ -36,31 +29,15 @@ pkgdir=$(go list -f '{{.Dir}}' $PKG)
echo 1>&2 $pkgdir
base=$(echo $pkgdir | sed "s,/$PKG\$,,")
echo 1>&2 "base: $base"
-cd $base
+cd "$base"
echo 1>&2 "fetching latest protos... "
git clone -q $UPSTREAM $tmpdir
-# Pass 1: build mapping from upstream filename to our filename.
-declare -A filename_map
-for f in $(cd $PKG && find * -name '*.proto'); do
- echo -n 1>&2 "looking for latest version of $f... "
- up=$(cd $tmpdir/$UPSTREAM_SUBDIR && find * -name $(basename $f) | grep -v /testdata/)
- echo 1>&2 $up
- if [ $(echo $up | wc -w) != "1" ]; then
- die "not exactly one match"
- fi
- filename_map[$up]=$f
-done
-# Pass 2: copy files
-for up in "${!filename_map[@]}"; do
- f=${filename_map[$up]}
- shortname=$(basename $f | sed 's,\.proto$,,')
- cp $tmpdir/$UPSTREAM_SUBDIR/$up $PKG/$f
-done
-# Run protoc once per package.
-for dir in $(find $PKG -name '*.proto' | xargs dirname | sort | uniq); do
- echo 1>&2 "* $dir"
- protoc --go_out=. $dir/*.proto
+for file in ${PROTO_FILES[@]}; do
+ echo 1>&2 "* $file"
+ protoc --go_out=. -I$tmpdir/src $tmpdir/src/google/protobuf/$file.proto || die
+ cp $tmpdir/src/google/protobuf/$file.proto $PKG/$file
done
+
echo 1>&2 "All OK"
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
index 1b36576..47f10db 100644
--- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
@@ -99,6 +99,15 @@ func Timestamp(ts *tspb.Timestamp) (time.Time, error) {
return t, validateTimestamp(ts)
}
+// TimestampNow returns a google.protobuf.Timestamp for the current time.
+func TimestampNow() *tspb.Timestamp {
+ ts, err := TimestampProto(time.Now())
+ if err != nil {
+ panic("ptypes: time.Now() out of Timestamp range")
+ }
+ return ts
+}
+
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
// It returns an error if the resulting Timestamp is invalid.
func TimestampProto(t time.Time) (*tspb.Timestamp, error) {
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
index ffcc515..e23e4a2 100644
--- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
@@ -1,12 +1,11 @@
-// Code generated by protoc-gen-go.
-// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
-// DO NOT EDIT!
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: google/protobuf/timestamp.proto
/*
Package timestamp is a generated protocol buffer package.
It is generated from these files:
- github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
+ google/protobuf/timestamp.proto
It has these top-level messages:
Timestamp
@@ -40,6 +39,8 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// and from RFC 3339 date strings.
// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
//
+// # Examples
+//
// Example 1: Compute Timestamp from POSIX `time()`.
//
// Timestamp timestamp;
@@ -77,15 +78,36 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
//
// Example 5: Compute Timestamp from current time in Python.
//
-// now = time.time()
-// seconds = int(now)
-// nanos = int((now - seconds) * 10**9)
-// timestamp = Timestamp(seconds=seconds, nanos=nanos)
+// timestamp = Timestamp()
+// timestamp.GetCurrentTime()
+//
+// # JSON Mapping
+//
+// In JSON format, the Timestamp type is encoded as a string in the
+// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
+// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
+// where {year} is always expressed using four digits while {month}, {day},
+// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
+// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
+// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
+// is required, though only UTC (as indicated by "Z") is presently supported.
+//
+// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
+// 01:30 UTC on January 15, 2017.
+//
+// In JavaScript, one can convert a Date object to this format using the
+// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
+// method. In Python, a standard `datetime.datetime` object can be converted
+// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
+// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
+// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
+// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime())
+// to obtain a formatter capable of generating timestamps in this format.
//
//
type Timestamp struct {
// Represents seconds of UTC time since Unix epoch
- // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to
+ // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
// 9999-12-31T23:59:59Z inclusive.
Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"`
// Non-negative fractions of a second at nanosecond resolution. Negative
@@ -101,27 +123,38 @@ func (*Timestamp) ProtoMessage() {}
func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" }
-func init() {
- proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
+func (m *Timestamp) GetSeconds() int64 {
+ if m != nil {
+ return m.Seconds
+ }
+ return 0
+}
+
+func (m *Timestamp) GetNanos() int32 {
+ if m != nil {
+ return m.Nanos
+ }
+ return 0
}
func init() {
- proto.RegisterFile("github.com/golang/protobuf/ptypes/timestamp/timestamp.proto", fileDescriptor0)
+ proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp")
}
+func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor0) }
+
var fileDescriptor0 = []byte{
- // 194 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9,
- 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28,
- 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2f, 0xc9,
- 0xcc, 0x4d, 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0x40, 0xb0, 0xf4, 0xc0, 0x6a, 0x84, 0xf8, 0xd3, 0xf3,
- 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x60, 0x3a, 0x94, 0xac, 0xb9, 0x38, 0x43, 0x60, 0x6a, 0x84, 0x24,
- 0xb8, 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83,
- 0x60, 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d,
- 0xd6, 0x20, 0x08, 0xc7, 0xa9, 0x91, 0x91, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x50, 0x27,
- 0x3e, 0xb8, 0x91, 0x01, 0x20, 0xa1, 0x00, 0xc6, 0x28, 0x6d, 0x12, 0x1c, 0xbd, 0x80, 0x91, 0xf1,
- 0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10, 0xc3, 0x03,
- 0xa0, 0xca, 0xf5, 0xc2, 0x53, 0x73, 0x72, 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, 0xda, 0x92,
- 0xd8, 0xc0, 0xe6, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x17, 0x5f, 0xb7, 0xdc, 0x17, 0x01,
- 0x00, 0x00,
+ // 191 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f,
+ 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d,
+ 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28,
+ 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5,
+ 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89,
+ 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70,
+ 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51,
+ 0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89,
+ 0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71,
+ 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a,
+ 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43,
+ 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00,
}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
index 7992a85..b7cbd17 100644
--- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
@@ -38,7 +38,6 @@ option go_package = "github.com/golang/protobuf/ptypes/timestamp";
option java_package = "com.google.protobuf";
option java_outer_classname = "TimestampProto";
option java_multiple_files = true;
-option java_generate_equals_and_hash = true;
option objc_class_prefix = "GPB";
// A Timestamp represents a point in time independent of any time zone
@@ -53,6 +52,8 @@ option objc_class_prefix = "GPB";
// and from RFC 3339 date strings.
// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt).
//
+// # Examples
+//
// Example 1: Compute Timestamp from POSIX `time()`.
//
// Timestamp timestamp;
@@ -90,16 +91,37 @@ option objc_class_prefix = "GPB";
//
// Example 5: Compute Timestamp from current time in Python.
//
-// now = time.time()
-// seconds = int(now)
-// nanos = int((now - seconds) * 10**9)
-// timestamp = Timestamp(seconds=seconds, nanos=nanos)
+// timestamp = Timestamp()
+// timestamp.GetCurrentTime()
+//
+// # JSON Mapping
+//
+// In JSON format, the Timestamp type is encoded as a string in the
+// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the
+// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z"
+// where {year} is always expressed using four digits while {month}, {day},
+// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional
+// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution),
+// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone
+// is required, though only UTC (as indicated by "Z") is presently supported.
+//
+// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past
+// 01:30 UTC on January 15, 2017.
+//
+// In JavaScript, one can convert a Date object to this format using the
+// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString]
+// method. In Python, a standard `datetime.datetime` object can be converted
+// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime)
+// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one
+// can use the Joda Time's [`ISODateTimeFormat.dateTime()`](
+// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime())
+// to obtain a formatter capable of generating timestamps in this format.
//
//
message Timestamp {
// Represents seconds of UTC time since Unix epoch
- // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to
+ // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
// 9999-12-31T23:59:59Z inclusive.
int64 seconds = 1;
diff --git a/vendor/github.com/golang/snappy/AUTHORS b/vendor/github.com/golang/snappy/AUTHORS
new file mode 100644
index 0000000..bcfa195
--- /dev/null
+++ b/vendor/github.com/golang/snappy/AUTHORS
@@ -0,0 +1,15 @@
+# This is the official list of Snappy-Go authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS files.
+# See the latter for an explanation.
+
+# Names should be added to this file as
+# Name or Organization <email address>
+# The email address is not required for organizations.
+
+# Please keep the list sorted.
+
+Damian Gryski <dgryski@gmail.com>
+Google Inc.
+Jan Mercl <0xjnml@gmail.com>
+Rodolfo Carvalho <rhcarvalho@gmail.com>
+Sebastien Binet <seb.binet@gmail.com>
diff --git a/vendor/github.com/golang/snappy/CONTRIBUTORS b/vendor/github.com/golang/snappy/CONTRIBUTORS
new file mode 100644
index 0000000..931ae31
--- /dev/null
+++ b/vendor/github.com/golang/snappy/CONTRIBUTORS
@@ -0,0 +1,37 @@
+# This is the official list of people who can contribute
+# (and typically have contributed) code to the Snappy-Go repository.
+# The AUTHORS file lists the copyright holders; this file
+# lists people. For example, Google employees are listed here
+# but not in AUTHORS, because Google holds the copyright.
+#
+# The submission process automatically checks to make sure
+# that people submitting code are listed in this file (by email address).
+#
+# Names should be added to this file only after verifying that
+# the individual or the individual's organization has agreed to
+# the appropriate Contributor License Agreement, found here:
+#
+# http://code.google.com/legal/individual-cla-v1.0.html
+# http://code.google.com/legal/corporate-cla-v1.0.html
+#
+# The agreement for individuals can be filled out on the web.
+#
+# When adding J Random Contributor's name to this file,
+# either J's name or J's organization's name should be
+# added to the AUTHORS file, depending on whether the
+# individual or corporate CLA was used.
+
+# Names should be added to this file like so:
+# Name <email address>
+
+# Please keep the list sorted.
+
+Damian Gryski <dgryski@gmail.com>
+Jan Mercl <0xjnml@gmail.com>
+Kai Backman <kaib@golang.org>
+Marc-Antoine Ruel <maruel@chromium.org>
+Nigel Tao <nigeltao@golang.org>
+Rob Pike <r@golang.org>
+Rodolfo Carvalho <rhcarvalho@gmail.com>
+Russ Cox <rsc@golang.org>
+Sebastien Binet <seb.binet@gmail.com>
diff --git a/vendor/github.com/golang/snappy/LICENSE b/vendor/github.com/golang/snappy/LICENSE
new file mode 100644
index 0000000..6050c10
--- /dev/null
+++ b/vendor/github.com/golang/snappy/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2011 The Snappy-Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/golang/snappy/README b/vendor/github.com/golang/snappy/README
new file mode 100644
index 0000000..cea1287
--- /dev/null
+++ b/vendor/github.com/golang/snappy/README
@@ -0,0 +1,107 @@
+The Snappy compression format in the Go programming language.
+
+To download and install from source:
+$ go get github.com/golang/snappy
+
+Unless otherwise noted, the Snappy-Go source files are distributed
+under the BSD-style license found in the LICENSE file.
+
+
+
+Benchmarks.
+
+The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten
+or so files, the same set used by the C++ Snappy code (github.com/google/snappy
+and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @
+3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29:
+
+"go test -test.bench=."
+
+_UFlat0-8 2.19GB/s ± 0% html
+_UFlat1-8 1.41GB/s ± 0% urls
+_UFlat2-8 23.5GB/s ± 2% jpg
+_UFlat3-8 1.91GB/s ± 0% jpg_200
+_UFlat4-8 14.0GB/s ± 1% pdf
+_UFlat5-8 1.97GB/s ± 0% html4
+_UFlat6-8 814MB/s ± 0% txt1
+_UFlat7-8 785MB/s ± 0% txt2
+_UFlat8-8 857MB/s ± 0% txt3
+_UFlat9-8 719MB/s ± 1% txt4
+_UFlat10-8 2.84GB/s ± 0% pb
+_UFlat11-8 1.05GB/s ± 0% gaviota
+
+_ZFlat0-8 1.04GB/s ± 0% html
+_ZFlat1-8 534MB/s ± 0% urls
+_ZFlat2-8 15.7GB/s ± 1% jpg
+_ZFlat3-8 740MB/s ± 3% jpg_200
+_ZFlat4-8 9.20GB/s ± 1% pdf
+_ZFlat5-8 991MB/s ± 0% html4
+_ZFlat6-8 379MB/s ± 0% txt1
+_ZFlat7-8 352MB/s ± 0% txt2
+_ZFlat8-8 396MB/s ± 1% txt3
+_ZFlat9-8 327MB/s ± 1% txt4
+_ZFlat10-8 1.33GB/s ± 1% pb
+_ZFlat11-8 605MB/s ± 1% gaviota
+
+
+
+"go test -test.bench=. -tags=noasm"
+
+_UFlat0-8 621MB/s ± 2% html
+_UFlat1-8 494MB/s ± 1% urls
+_UFlat2-8 23.2GB/s ± 1% jpg
+_UFlat3-8 1.12GB/s ± 1% jpg_200
+_UFlat4-8 4.35GB/s ± 1% pdf
+_UFlat5-8 609MB/s ± 0% html4
+_UFlat6-8 296MB/s ± 0% txt1
+_UFlat7-8 288MB/s ± 0% txt2
+_UFlat8-8 309MB/s ± 1% txt3
+_UFlat9-8 280MB/s ± 1% txt4
+_UFlat10-8 753MB/s ± 0% pb
+_UFlat11-8 400MB/s ± 0% gaviota
+
+_ZFlat0-8 409MB/s ± 1% html
+_ZFlat1-8 250MB/s ± 1% urls
+_ZFlat2-8 12.3GB/s ± 1% jpg
+_ZFlat3-8 132MB/s ± 0% jpg_200
+_ZFlat4-8 2.92GB/s ± 0% pdf
+_ZFlat5-8 405MB/s ± 1% html4
+_ZFlat6-8 179MB/s ± 1% txt1
+_ZFlat7-8 170MB/s ± 1% txt2
+_ZFlat8-8 189MB/s ± 1% txt3
+_ZFlat9-8 164MB/s ± 1% txt4
+_ZFlat10-8 479MB/s ± 1% pb
+_ZFlat11-8 270MB/s ± 1% gaviota
+
+
+
+For comparison (Go's encoded output is byte-for-byte identical to C++'s), here
+are the numbers from C++ Snappy's
+
+make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log
+
+BM_UFlat/0 2.4GB/s html
+BM_UFlat/1 1.4GB/s urls
+BM_UFlat/2 21.8GB/s jpg
+BM_UFlat/3 1.5GB/s jpg_200
+BM_UFlat/4 13.3GB/s pdf
+BM_UFlat/5 2.1GB/s html4
+BM_UFlat/6 1.0GB/s txt1
+BM_UFlat/7 959.4MB/s txt2
+BM_UFlat/8 1.0GB/s txt3
+BM_UFlat/9 864.5MB/s txt4
+BM_UFlat/10 2.9GB/s pb
+BM_UFlat/11 1.2GB/s gaviota
+
+BM_ZFlat/0 944.3MB/s html (22.31 %)
+BM_ZFlat/1 501.6MB/s urls (47.78 %)
+BM_ZFlat/2 14.3GB/s jpg (99.95 %)
+BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %)
+BM_ZFlat/4 8.3GB/s pdf (83.30 %)
+BM_ZFlat/5 903.5MB/s html4 (22.52 %)
+BM_ZFlat/6 336.0MB/s txt1 (57.88 %)
+BM_ZFlat/7 312.3MB/s txt2 (61.91 %)
+BM_ZFlat/8 353.1MB/s txt3 (54.99 %)
+BM_ZFlat/9 289.9MB/s txt4 (66.26 %)
+BM_ZFlat/10 1.2GB/s pb (19.68 %)
+BM_ZFlat/11 527.4MB/s gaviota (37.72 %)
diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go
new file mode 100644
index 0000000..72efb03
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode.go
@@ -0,0 +1,237 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+)
+
+var (
+ // ErrCorrupt reports that the input is invalid.
+ ErrCorrupt = errors.New("snappy: corrupt input")
+ // ErrTooLarge reports that the uncompressed length is too large.
+ ErrTooLarge = errors.New("snappy: decoded block is too large")
+ // ErrUnsupported reports that the input isn't supported.
+ ErrUnsupported = errors.New("snappy: unsupported input")
+
+ errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length")
+)
+
+// DecodedLen returns the length of the decoded block.
+func DecodedLen(src []byte) (int, error) {
+ v, _, err := decodedLen(src)
+ return v, err
+}
+
+// decodedLen returns the length of the decoded block and the number of bytes
+// that the length header occupied.
+func decodedLen(src []byte) (blockLen, headerLen int, err error) {
+ v, n := binary.Uvarint(src)
+ if n <= 0 || v > 0xffffffff {
+ return 0, 0, ErrCorrupt
+ }
+
+ const wordSize = 32 << (^uint(0) >> 32 & 1)
+ if wordSize == 32 && v > 0x7fffffff {
+ return 0, 0, ErrTooLarge
+ }
+ return int(v), n, nil
+}
+
+const (
+ decodeErrCodeCorrupt = 1
+ decodeErrCodeUnsupportedLiteralLength = 2
+)
+
+// Decode returns the decoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire decoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+func Decode(dst, src []byte) ([]byte, error) {
+ dLen, s, err := decodedLen(src)
+ if err != nil {
+ return nil, err
+ }
+ if dLen <= len(dst) {
+ dst = dst[:dLen]
+ } else {
+ dst = make([]byte, dLen)
+ }
+ switch decode(dst, src[s:]) {
+ case 0:
+ return dst, nil
+ case decodeErrCodeUnsupportedLiteralLength:
+ return nil, errUnsupportedLiteralLength
+ }
+ return nil, ErrCorrupt
+}
+
+// NewReader returns a new Reader that decompresses from r, using the framing
+// format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func NewReader(r io.Reader) *Reader {
+ return &Reader{
+ r: r,
+ decoded: make([]byte, maxBlockSize),
+ buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize),
+ }
+}
+
+// Reader is an io.Reader that can read Snappy-compressed bytes.
+type Reader struct {
+ r io.Reader
+ err error
+ decoded []byte
+ buf []byte
+ // decoded[i:j] contains decoded bytes that have not yet been passed on.
+ i, j int
+ readHeader bool
+}
+
+// Reset discards any buffered data, resets all state, and switches the Snappy
+// reader to read from r. This permits reusing a Reader rather than allocating
+// a new one.
+func (r *Reader) Reset(reader io.Reader) {
+ r.r = reader
+ r.err = nil
+ r.i = 0
+ r.j = 0
+ r.readHeader = false
+}
+
+func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) {
+ if _, r.err = io.ReadFull(r.r, p); r.err != nil {
+ if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) {
+ r.err = ErrCorrupt
+ }
+ return false
+ }
+ return true
+}
+
+// Read satisfies the io.Reader interface.
+func (r *Reader) Read(p []byte) (int, error) {
+ if r.err != nil {
+ return 0, r.err
+ }
+ for {
+ if r.i < r.j {
+ n := copy(p, r.decoded[r.i:r.j])
+ r.i += n
+ return n, nil
+ }
+ if !r.readFull(r.buf[:4], true) {
+ return 0, r.err
+ }
+ chunkType := r.buf[0]
+ if !r.readHeader {
+ if chunkType != chunkTypeStreamIdentifier {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.readHeader = true
+ }
+ chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16
+ if chunkLen > len(r.buf) {
+ r.err = ErrUnsupported
+ return 0, r.err
+ }
+
+ // The chunk types are specified at
+ // https://github.com/google/snappy/blob/master/framing_format.txt
+ switch chunkType {
+ case chunkTypeCompressedData:
+ // Section 4.2. Compressed data (chunk type 0x00).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ buf := r.buf[:chunkLen]
+ if !r.readFull(buf, false) {
+ return 0, r.err
+ }
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ buf = buf[checksumSize:]
+
+ n, err := DecodedLen(buf)
+ if err != nil {
+ r.err = err
+ return 0, r.err
+ }
+ if n > len(r.decoded) {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if _, err := Decode(r.decoded, buf); err != nil {
+ r.err = err
+ return 0, r.err
+ }
+ if crc(r.decoded[:n]) != checksum {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.i, r.j = 0, n
+ continue
+
+ case chunkTypeUncompressedData:
+ // Section 4.3. Uncompressed data (chunk type 0x01).
+ if chunkLen < checksumSize {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ buf := r.buf[:checksumSize]
+ if !r.readFull(buf, false) {
+ return 0, r.err
+ }
+ checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24
+ // Read directly into r.decoded instead of via r.buf.
+ n := chunkLen - checksumSize
+ if n > len(r.decoded) {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if !r.readFull(r.decoded[:n], false) {
+ return 0, r.err
+ }
+ if crc(r.decoded[:n]) != checksum {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ r.i, r.j = 0, n
+ continue
+
+ case chunkTypeStreamIdentifier:
+ // Section 4.1. Stream identifier (chunk type 0xff).
+ if chunkLen != len(magicBody) {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ if !r.readFull(r.buf[:len(magicBody)], false) {
+ return 0, r.err
+ }
+ for i := 0; i < len(magicBody); i++ {
+ if r.buf[i] != magicBody[i] {
+ r.err = ErrCorrupt
+ return 0, r.err
+ }
+ }
+ continue
+ }
+
+ if chunkType <= 0x7f {
+ // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f).
+ r.err = ErrUnsupported
+ return 0, r.err
+ }
+ // Section 4.4 Padding (chunk type 0xfe).
+ // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd).
+ if !r.readFull(r.buf[:chunkLen], false) {
+ return 0, r.err
+ }
+ }
+}
diff --git a/vendor/github.com/golang/snappy/decode_amd64.go b/vendor/github.com/golang/snappy/decode_amd64.go
new file mode 100644
index 0000000..fcd192b
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_amd64.go
@@ -0,0 +1,14 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package snappy
+
+// decode has the same semantics as in decode_other.go.
+//
+//go:noescape
+func decode(dst, src []byte) int
diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s
new file mode 100644
index 0000000..e6179f6
--- /dev/null
+++ b/vendor/github.com/golang/snappy/decode_amd64.s
@@ -0,0 +1,490 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// The asm code generally follows the pure Go code in decode_other.go, except
+// where marked with a "!!!".
+
+// func decode(dst, src []byte) int
+//
+// All local variables fit into registers. The non-zero stack size is only to
+// spill registers and push args when issuing a CALL. The register allocation:
+// - AX scratch
+// - BX scratch
+// - CX length or x
+// - DX offset
+// - SI &src[s]
+// - DI &dst[d]
+// + R8 dst_base
+// + R9 dst_len
+// + R10 dst_base + dst_len
+// + R11 src_base
+// + R12 src_len
+// + R13 src_base + src_len
+// - R14 used by doCopy
+// - R15 used by doCopy
+//
+// The registers R8-R13 (marked with a "+") are set at the start of the
+// function, and after a CALL returns, and are not otherwise modified.
+//
+// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI.
+// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI.
+TEXT ·decode(SB), NOSPLIT, $48-56
+ // Initialize SI, DI and R8-R13.
+ MOVQ dst_base+0(FP), R8
+ MOVQ dst_len+8(FP), R9
+ MOVQ R8, DI
+ MOVQ R8, R10
+ ADDQ R9, R10
+ MOVQ src_base+24(FP), R11
+ MOVQ src_len+32(FP), R12
+ MOVQ R11, SI
+ MOVQ R11, R13
+ ADDQ R12, R13
+
+loop:
+ // for s < len(src)
+ CMPQ SI, R13
+ JEQ end
+
+ // CX = uint32(src[s])
+ //
+ // switch src[s] & 0x03
+ MOVBLZX (SI), CX
+ MOVL CX, BX
+ ANDL $3, BX
+ CMPL BX, $1
+ JAE tagCopy
+
+ // ----------------------------------------
+ // The code below handles literal tags.
+
+ // case tagLiteral:
+ // x := uint32(src[s] >> 2)
+ // switch
+ SHRL $2, CX
+ CMPL CX, $60
+ JAE tagLit60Plus
+
+ // case x < 60:
+ // s++
+ INCQ SI
+
+doLit:
+ // This is the end of the inner "switch", when we have a literal tag.
+ //
+ // We assume that CX == x and x fits in a uint32, where x is the variable
+ // used in the pure Go decode_other.go code.
+
+ // length = int(x) + 1
+ //
+ // Unlike the pure Go code, we don't need to check if length <= 0 because
+ // CX can hold 64 bits, so the increment cannot overflow.
+ INCQ CX
+
+ // Prepare to check if copying length bytes will run past the end of dst or
+ // src.
+ //
+ // AX = len(dst) - d
+ // BX = len(src) - s
+ MOVQ R10, AX
+ SUBQ DI, AX
+ MOVQ R13, BX
+ SUBQ SI, BX
+
+ // !!! Try a faster technique for short (16 or fewer bytes) copies.
+ //
+ // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 {
+ // goto callMemmove // Fall back on calling runtime·memmove.
+ // }
+ //
+ // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s
+ // against 21 instead of 16, because it cannot assume that all of its input
+ // is contiguous in memory and so it needs to leave enough source bytes to
+ // read the next tag without refilling buffers, but Go's Decode assumes
+ // contiguousness (the src argument is a []byte).
+ CMPQ CX, $16
+ JGT callMemmove
+ CMPQ AX, $16
+ JLT callMemmove
+ CMPQ BX, $16
+ JLT callMemmove
+
+ // !!! Implement the copy from src to dst as a 16-byte load and store.
+ // (Decode's documentation says that dst and src must not overlap.)
+ //
+ // This always copies 16 bytes, instead of only length bytes, but that's
+ // OK. If the input is a valid Snappy encoding then subsequent iterations
+ // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a
+ // non-nil error), so the overrun will be ignored.
+ //
+ // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
+ // 16-byte loads and stores. This technique probably wouldn't be as
+ // effective on architectures that are fussier about alignment.
+ MOVOU 0(SI), X0
+ MOVOU X0, 0(DI)
+
+ // d += length
+ // s += length
+ ADDQ CX, DI
+ ADDQ CX, SI
+ JMP loop
+
+callMemmove:
+ // if length > len(dst)-d || length > len(src)-s { etc }
+ CMPQ CX, AX
+ JGT errCorrupt
+ CMPQ CX, BX
+ JGT errCorrupt
+
+ // copy(dst[d:], src[s:s+length])
+ //
+ // This means calling runtime·memmove(&dst[d], &src[s], length), so we push
+ // DI, SI and CX as arguments. Coincidentally, we also need to spill those
+ // three registers to the stack, to save local variables across the CALL.
+ MOVQ DI, 0(SP)
+ MOVQ SI, 8(SP)
+ MOVQ CX, 16(SP)
+ MOVQ DI, 24(SP)
+ MOVQ SI, 32(SP)
+ MOVQ CX, 40(SP)
+ CALL runtime·memmove(SB)
+
+ // Restore local variables: unspill registers from the stack and
+ // re-calculate R8-R13.
+ MOVQ 24(SP), DI
+ MOVQ 32(SP), SI
+ MOVQ 40(SP), CX
+ MOVQ dst_base+0(FP), R8
+ MOVQ dst_len+8(FP), R9
+ MOVQ R8, R10
+ ADDQ R9, R10
+ MOVQ src_base+24(FP), R11
+ MOVQ src_len+32(FP), R12
+ MOVQ R11, R13
+ ADDQ R12, R13
+
+ // d += length
+ // s += length
+ ADDQ CX, DI
+ ADDQ CX, SI
+ JMP loop
+
+tagLit60Plus:
+ // !!! This fragment does the
+ //
+ // s += x - 58; if uint(s) > uint(len(src)) { etc }
+ //
+ // checks. In the asm version, we code it once instead of once per switch case.
+ ADDQ CX, SI
+ SUBQ $58, SI
+ MOVQ SI, BX
+ SUBQ R11, BX
+ CMPQ BX, R12
+ JA errCorrupt
+
+ // case x == 60:
+ CMPL CX, $61
+ JEQ tagLit61
+ JA tagLit62Plus
+
+ // x = uint32(src[s-1])
+ MOVBLZX -1(SI), CX
+ JMP doLit
+
+tagLit61:
+ // case x == 61:
+ // x = uint32(src[s-2]) | uint32(src[s-1])<<8
+ MOVWLZX -2(SI), CX
+ JMP doLit
+
+tagLit62Plus:
+ CMPL CX, $62
+ JA tagLit63
+
+ // case x == 62:
+ // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16
+ MOVWLZX -3(SI), CX
+ MOVBLZX -1(SI), BX
+ SHLL $16, BX
+ ORL BX, CX
+ JMP doLit
+
+tagLit63:
+ // case x == 63:
+ // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24
+ MOVL -4(SI), CX
+ JMP doLit
+
+// The code above handles literal tags.
+// ----------------------------------------
+// The code below handles copy tags.
+
+tagCopy4:
+ // case tagCopy4:
+ // s += 5
+ ADDQ $5, SI
+
+ // if uint(s) > uint(len(src)) { etc }
+ MOVQ SI, BX
+ SUBQ R11, BX
+ CMPQ BX, R12
+ JA errCorrupt
+
+ // length = 1 + int(src[s-5])>>2
+ SHRQ $2, CX
+ INCQ CX
+
+ // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24)
+ MOVLQZX -4(SI), DX
+ JMP doCopy
+
+tagCopy2:
+ // case tagCopy2:
+ // s += 3
+ ADDQ $3, SI
+
+ // if uint(s) > uint(len(src)) { etc }
+ MOVQ SI, BX
+ SUBQ R11, BX
+ CMPQ BX, R12
+ JA errCorrupt
+
+ // length = 1 + int(src[s-3])>>2
+ SHRQ $2, CX
+ INCQ CX
+
+ // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8)
+ MOVWQZX -2(SI), DX
+ JMP doCopy
+
+tagCopy:
+ // We have a copy tag. We assume that:
+ // - BX == src[s] & 0x03
+ // - CX == src[s]
+ CMPQ BX, $2
+ JEQ tagCopy2
+ JA tagCopy4
+
+ // case tagCopy1:
+ // s += 2
+ ADDQ $2, SI
+
+ // if uint(s) > uint(len(src)) { etc }
+ MOVQ SI, BX
+ SUBQ R11, BX
+ CMPQ BX, R12
+ JA errCorrupt
+
+ // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1]))
+ MOVQ CX, DX
+ ANDQ $0xe0, DX
+ SHLQ $3, DX
+ MOVBQZX -1(SI), BX
+ ORQ BX, DX
+
+ // length = 4 + int(src[s-2])>>2&0x7
+ SHRQ $2, CX
+ ANDQ $7, CX
+ ADDQ $4, CX
+
+doCopy:
+ // This is the end of the outer "switch", when we have a copy tag.
+ //
+ // We assume that:
+ // - CX == length && CX > 0
+ // - DX == offset
+
+ // if offset <= 0 { etc }
+ CMPQ DX, $0
+ JLE errCorrupt
+
+ // if d < offset { etc }
+ MOVQ DI, BX
+ SUBQ R8, BX
+ CMPQ BX, DX
+ JLT errCorrupt
+
+ // if length > len(dst)-d { etc }
+ MOVQ R10, BX
+ SUBQ DI, BX
+ CMPQ CX, BX
+ JGT errCorrupt
+
+ // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length
+ //
+ // Set:
+ // - R14 = len(dst)-d
+ // - R15 = &dst[d-offset]
+ MOVQ R10, R14
+ SUBQ DI, R14
+ MOVQ DI, R15
+ SUBQ DX, R15
+
+ // !!! Try a faster technique for short (16 or fewer bytes) forward copies.
+ //
+ // First, try using two 8-byte load/stores, similar to the doLit technique
+ // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is
+ // still OK if offset >= 8. Note that this has to be two 8-byte load/stores
+ // and not one 16-byte load/store, and the first store has to be before the
+ // second load, due to the overlap if offset is in the range [8, 16).
+ //
+ // if length > 16 || offset < 8 || len(dst)-d < 16 {
+ // goto slowForwardCopy
+ // }
+ // copy 16 bytes
+ // d += length
+ CMPQ CX, $16
+ JGT slowForwardCopy
+ CMPQ DX, $8
+ JLT slowForwardCopy
+ CMPQ R14, $16
+ JLT slowForwardCopy
+ MOVQ 0(R15), AX
+ MOVQ AX, 0(DI)
+ MOVQ 8(R15), BX
+ MOVQ BX, 8(DI)
+ ADDQ CX, DI
+ JMP loop
+
+slowForwardCopy:
+ // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we
+ // can still try 8-byte load stores, provided we can overrun up to 10 extra
+ // bytes. As above, the overrun will be fixed up by subsequent iterations
+ // of the outermost loop.
+ //
+ // The C++ snappy code calls this technique IncrementalCopyFastPath. Its
+ // commentary says:
+ //
+ // ----
+ //
+ // The main part of this loop is a simple copy of eight bytes at a time
+ // until we've copied (at least) the requested amount of bytes. However,
+ // if d and d-offset are less than eight bytes apart (indicating a
+ // repeating pattern of length < 8), we first need to expand the pattern in
+ // order to get the correct results. For instance, if the buffer looks like
+ // this, with the eight-byte <d-offset> and <d> patterns marked as
+ // intervals:
+ //
+ // abxxxxxxxxxxxx
+ // [------] d-offset
+ // [------] d
+ //
+ // a single eight-byte copy from <d-offset> to <d> will repeat the pattern
+ // once, after which we can move <d> two bytes without moving <d-offset>:
+ //
+ // ababxxxxxxxxxx
+ // [------] d-offset
+ // [------] d
+ //
+ // and repeat the exercise until the two no longer overlap.
+ //
+ // This allows us to do very well in the special case of one single byte
+ // repeated many times, without taking a big hit for more general cases.
+ //
+ // The worst case of extra writing past the end of the match occurs when
+ // offset == 1 and length == 1; the last copy will read from byte positions
+ // [0..7] and write to [4..11], whereas it was only supposed to write to
+ // position 1. Thus, ten excess bytes.
+ //
+ // ----
+ //
+ // That "10 byte overrun" worst case is confirmed by Go's
+ // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy
+ // and finishSlowForwardCopy algorithm.
+ //
+ // if length > len(dst)-d-10 {
+ // goto verySlowForwardCopy
+ // }
+ SUBQ $10, R14
+ CMPQ CX, R14
+ JGT verySlowForwardCopy
+
+makeOffsetAtLeast8:
+ // !!! As above, expand the pattern so that offset >= 8 and we can use
+ // 8-byte load/stores.
+ //
+ // for offset < 8 {
+ // copy 8 bytes from dst[d-offset:] to dst[d:]
+ // length -= offset
+ // d += offset
+ // offset += offset
+ // // The two previous lines together means that d-offset, and therefore
+ // // R15, is unchanged.
+ // }
+ CMPQ DX, $8
+ JGE fixUpSlowForwardCopy
+ MOVQ (R15), BX
+ MOVQ BX, (DI)
+ SUBQ DX, CX
+ ADDQ DX, DI
+ ADDQ DX, DX
+ JMP makeOffsetAtLeast8
+
+fixUpSlowForwardCopy:
+ // !!! Add length (which might be negative now) to d (implied by DI being
+ // &dst[d]) so that d ends up at the right place when we jump back to the
+ // top of the loop. Before we do that, though, we save DI to AX so that, if
+ // length is positive, copying the remaining length bytes will write to the
+ // right place.
+ MOVQ DI, AX
+ ADDQ CX, DI
+
+finishSlowForwardCopy:
+ // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative
+ // length means that we overrun, but as above, that will be fixed up by
+ // subsequent iterations of the outermost loop.
+ CMPQ CX, $0
+ JLE loop
+ MOVQ (R15), BX
+ MOVQ BX, (AX)
+ ADDQ $8, R15
+ ADDQ $8, AX
+ SUBQ $8, CX
+ JMP finishSlowForwardCopy
+
+verySlowForwardCopy:
+ // verySlowForwardCopy is a simple implementation of forward copy. In C
+ // parlance, this is a do/while loop instead of a while loop, since we know
+ // that length > 0. In Go syntax:
+ //
+ // for {
+ // dst[d] = dst[d - offset]
+ // d++
+ // length--
+ // if length == 0 {
+ // break
+ // }
+ // }
+ MOVB (R15), BX
+ MOVB BX, (DI)
+ INCQ R15
+ INCQ DI
+ DECQ CX
+ JNZ verySlowForwardCopy
+ JMP loop
+
+// The code above handles copy tags.
+// ----------------------------------------
+
+end:
+ // This is the end of the "for s < len(src)".
+ //
+ // if d != len(dst) { etc }
+ CMPQ DI, R10
+ JNE errCorrupt
+
+ // return 0
+ MOVQ $0, ret+48(FP)
+ RET
+
+errCorrupt:
+ // return decodeErrCodeCorrupt
+ MOVQ $1, ret+48(FP)
+ RET
diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go
new file mode 100644
index 0000000..8d393e9
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode.go
@@ -0,0 +1,285 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package snappy
+
+import (
+ "encoding/binary"
+ "errors"
+ "io"
+)
+
+// Encode returns the encoded form of src. The returned slice may be a sub-
+// slice of dst if dst was large enough to hold the entire encoded block.
+// Otherwise, a newly allocated slice will be returned.
+//
+// The dst and src must not overlap. It is valid to pass a nil dst.
+func Encode(dst, src []byte) []byte {
+ if n := MaxEncodedLen(len(src)); n < 0 {
+ panic(ErrTooLarge)
+ } else if len(dst) < n {
+ dst = make([]byte, n)
+ }
+
+ // The block starts with the varint-encoded length of the decompressed bytes.
+ d := binary.PutUvarint(dst, uint64(len(src)))
+
+ for len(src) > 0 {
+ p := src
+ src = nil
+ if len(p) > maxBlockSize {
+ p, src = p[:maxBlockSize], p[maxBlockSize:]
+ }
+ if len(p) < minNonLiteralBlockSize {
+ d += emitLiteral(dst[d:], p)
+ } else {
+ d += encodeBlock(dst[d:], p)
+ }
+ }
+ return dst[:d]
+}
+
+// inputMargin is the minimum number of extra input bytes to keep, inside
+// encodeBlock's inner loop. On some architectures, this margin lets us
+// implement a fast path for emitLiteral, where the copy of short (<= 16 byte)
+// literals can be implemented as a single load to and store from a 16-byte
+// register. That literal's actual length can be as short as 1 byte, so this
+// can copy up to 15 bytes too much, but that's OK as subsequent iterations of
+// the encoding loop will fix up the copy overrun, and this inputMargin ensures
+// that we don't overrun the dst and src buffers.
+const inputMargin = 16 - 1
+
+// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that
+// could be encoded with a copy tag. This is the minimum with respect to the
+// algorithm used by encodeBlock, not a minimum enforced by the file format.
+//
+// The encoded output must start with at least a 1 byte literal, as there are
+// no previous bytes to copy. A minimal (1 byte) copy after that, generated
+// from an emitCopy call in encodeBlock's main loop, would require at least
+// another inputMargin bytes, for the reason above: we want any emitLiteral
+// calls inside encodeBlock's main loop to use the fast path if possible, which
+// requires being able to overrun by inputMargin bytes. Thus,
+// minNonLiteralBlockSize equals 1 + 1 + inputMargin.
+//
+// The C++ code doesn't use this exact threshold, but it could, as discussed at
+// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion
+// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an
+// optimization. It should not affect the encoded form. This is tested by
+// TestSameEncodingAsCppShortCopies.
+const minNonLiteralBlockSize = 1 + 1 + inputMargin
+
+// MaxEncodedLen returns the maximum length of a snappy block, given its
+// uncompressed length.
+//
+// It will return a negative value if srcLen is too large to encode.
+func MaxEncodedLen(srcLen int) int {
+ n := uint64(srcLen)
+ if n > 0xffffffff {
+ return -1
+ }
+ // Compressed data can be defined as:
+ // compressed := item* literal*
+ // item := literal* copy
+ //
+ // The trailing literal sequence has a space blowup of at most 62/60
+ // since a literal of length 60 needs one tag byte + one extra byte
+ // for length information.
+ //
+ // Item blowup is trickier to measure. Suppose the "copy" op copies
+ // 4 bytes of data. Because of a special check in the encoding code,
+ // we produce a 4-byte copy only if the offset is < 65536. Therefore
+ // the copy op takes 3 bytes to encode, and this type of item leads
+ // to at most the 62/60 blowup for representing literals.
+ //
+ // Suppose the "copy" op copies 5 bytes of data. If the offset is big
+ // enough, it will take 5 bytes to encode the copy op. Therefore the
+ // worst case here is a one-byte literal followed by a five-byte copy.
+ // That is, 6 bytes of input turn into 7 bytes of "compressed" data.
+ //
+ // This last factor dominates the blowup, so the final estimate is:
+ n = 32 + n + n/6
+ if n > 0xffffffff {
+ return -1
+ }
+ return int(n)
+}
+
+var errClosed = errors.New("snappy: Writer is closed")
+
+// NewWriter returns a new Writer that compresses to w.
+//
+// The Writer returned does not buffer writes. There is no need to Flush or
+// Close such a Writer.
+//
+// Deprecated: the Writer returned is not suitable for many small writes, only
+// for few large writes. Use NewBufferedWriter instead, which is efficient
+// regardless of the frequency and shape of the writes, and remember to Close
+// that Writer when done.
+func NewWriter(w io.Writer) *Writer {
+ return &Writer{
+ w: w,
+ obuf: make([]byte, obufLen),
+ }
+}
+
+// NewBufferedWriter returns a new Writer that compresses to w, using the
+// framing format described at
+// https://github.com/google/snappy/blob/master/framing_format.txt
+//
+// The Writer returned buffers writes. Users must call Close to guarantee all
+// data has been forwarded to the underlying io.Writer. They may also call
+// Flush zero or more times before calling Close.
+func NewBufferedWriter(w io.Writer) *Writer {
+ return &Writer{
+ w: w,
+ ibuf: make([]byte, 0, maxBlockSize),
+ obuf: make([]byte, obufLen),
+ }
+}
+
+// Writer is an io.Writer that can write Snappy-compressed bytes.
+type Writer struct {
+ w io.Writer
+ err error
+
+ // ibuf is a buffer for the incoming (uncompressed) bytes.
+ //
+ // Its use is optional. For backwards compatibility, Writers created by the
+ // NewWriter function have ibuf == nil, do not buffer incoming bytes, and
+ // therefore do not need to be Flush'ed or Close'd.
+ ibuf []byte
+
+ // obuf is a buffer for the outgoing (compressed) bytes.
+ obuf []byte
+
+ // wroteStreamHeader is whether we have written the stream header.
+ wroteStreamHeader bool
+}
+
+// Reset discards the writer's state and switches the Snappy writer to write to
+// w. This permits reusing a Writer rather than allocating a new one.
+func (w *Writer) Reset(writer io.Writer) {
+ w.w = writer
+ w.err = nil
+ if w.ibuf != nil {
+ w.ibuf = w.ibuf[:0]
+ }
+ w.wroteStreamHeader = false
+}
+
+// Write satisfies the io.Writer interface.
+func (w *Writer) Write(p []byte) (nRet int, errRet error) {
+ if w.ibuf == nil {
+ // Do not buffer incoming bytes. This does not perform or compress well
+ // if the caller of Writer.Write writes many small slices. This
+ // behavior is therefore deprecated, but still supported for backwards
+ // compatibility with code that doesn't explicitly Flush or Close.
+ return w.write(p)
+ }
+
+ // The remainder of this method is based on bufio.Writer.Write from the
+ // standard library.
+
+ for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil {
+ var n int
+ if len(w.ibuf) == 0 {
+ // Large write, empty buffer.
+ // Write directly from p to avoid copy.
+ n, _ = w.write(p)
+ } else {
+ n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+ w.ibuf = w.ibuf[:len(w.ibuf)+n]
+ w.Flush()
+ }
+ nRet += n
+ p = p[n:]
+ }
+ if w.err != nil {
+ return nRet, w.err
+ }
+ n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
+ w.ibuf = w.ibuf[:len(w.ibuf)+n]
+ nRet += n
+ return nRet, nil
+}
+
+func (w *Writer) write(p []byte) (nRet int, errRet error) {
+ if w.err != nil {
+ return 0, w.err
+ }
+ for len(p) > 0 {
+ obufStart := len(magicChunk)
+ if !w.wroteStreamHeader {
+ w.wroteStreamHeader = true
+ copy(w.obuf, magicChunk)
+ obufStart = 0
+ }
+
+ var uncompressed []byte
+ if len(p) > maxBlockSize {
+ uncompressed, p = p[:maxBlockSize], p[maxBlockSize:]
+ } else {
+ uncompressed, p = p, nil
+ }
+ checksum := crc(uncompressed)
+
+ // Compress the buffer, discarding the result if the improvement
+ // isn't at least 12.5%.
+ compressed := Encode(w.obuf[obufHeaderLen:], uncompressed)
+ chunkType := uint8(chunkTypeCompressedData)
+ chunkLen := 4 + len(compressed)
+ obufEnd := obufHeaderLen + len(compressed)
+ if len(compressed) >= len(uncompressed)-len(uncompressed)/8 {
+ chunkType = chunkTypeUncompressedData
+ chunkLen = 4 + len(uncompressed)
+ obufEnd = obufHeaderLen
+ }
+
+ // Fill in the per-chunk header that comes before the body.
+ w.obuf[len(magicChunk)+0] = chunkType
+ w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0)
+ w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8)
+ w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16)
+ w.obuf[len(magicChunk)+4] = uint8(checksum >> 0)
+ w.obuf[len(magicChunk)+5] = uint8(checksum >> 8)
+ w.obuf[len(magicChunk)+6] = uint8(checksum >> 16)
+ w.obuf[len(magicChunk)+7] = uint8(checksum >> 24)
+
+ if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil {
+ w.err = err
+ return nRet, err
+ }
+ if chunkType == chunkTypeUncompressedData {
+ if _, err := w.w.Write(uncompressed); err != nil {
+ w.err = err
+ return nRet, err
+ }
+ }
+ nRet += len(uncompressed)
+ }
+ return nRet, nil
+}
+
+// Flush flushes the Writer to its underlying io.Writer.
+func (w *Writer) Flush() error {
+ if w.err != nil {
+ return w.err
+ }
+ if len(w.ibuf) == 0 {
+ return nil
+ }
+ w.write(w.ibuf)
+ w.ibuf = w.ibuf[:0]
+ return w.err
+}
+
+// Close calls Flush and then closes the Writer.
+func (w *Writer) Close() error {
+ w.Flush()
+ ret := w.err
+ if w.err == nil {
+ w.err = errClosed
+ }
+ return ret
+}
diff --git a/vendor/github.com/golang/snappy/encode_amd64.go b/vendor/github.com/golang/snappy/encode_amd64.go
new file mode 100644
index 0000000..150d91b
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_amd64.go
@@ -0,0 +1,29 @@
+// Copyright 2016 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+package snappy
+
+// emitLiteral has the same semantics as in encode_other.go.
+//
+//go:noescape
+func emitLiteral(dst, lit []byte) int
+
+// emitCopy has the same semantics as in encode_other.go.
+//
+//go:noescape
+func emitCopy(dst []byte, offset, length int) int
+
+// extendMatch has the same semantics as in encode_other.go.
+//
+//go:noescape
+func extendMatch(src []byte, i, j int) int
+
+// encodeBlock has the same semantics as in encode_other.go.
+//
+//go:noescape
+func encodeBlock(dst, src []byte) (d int)
diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s
new file mode 100644
index 0000000..adfd979
--- /dev/null
+++ b/vendor/github.com/golang/snappy/encode_amd64.s
@@ -0,0 +1,730 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+// +build gc
+// +build !noasm
+
+#include "textflag.h"
+
+// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a
+// Go toolchain regression. See https://github.com/golang/go/issues/15426 and
+// https://github.com/golang/snappy/issues/29
+//
+// As a workaround, the package was built with a known good assembler, and
+// those instructions were disassembled by "objdump -d" to yield the
+// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
+// style comments, in AT&T asm syntax. Note that rsp here is a physical
+// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm).
+// The instructions were then encoded as "BYTE $0x.." sequences, which assemble
+// fine on Go 1.6.
+
+// The asm code generally follows the pure Go code in encode_other.go, except
+// where marked with a "!!!".
+
+// ----------------------------------------------------------------------------
+
+// func emitLiteral(dst, lit []byte) int
+//
+// All local variables fit into registers. The register allocation:
+// - AX len(lit)
+// - BX n
+// - DX return value
+// - DI &dst[i]
+// - R10 &lit[0]
+//
+// The 24 bytes of stack space is to call runtime·memmove.
+//
+// The unusual register allocation of local variables, such as R10 for the
+// source pointer, matches the allocation used at the call site in encodeBlock,
+// which makes it easier to manually inline this function.
+TEXT ·emitLiteral(SB), NOSPLIT, $24-56
+ MOVQ dst_base+0(FP), DI
+ MOVQ lit_base+24(FP), R10
+ MOVQ lit_len+32(FP), AX
+ MOVQ AX, DX
+ MOVL AX, BX
+ SUBL $1, BX
+
+ CMPL BX, $60
+ JLT oneByte
+ CMPL BX, $256
+ JLT twoBytes
+
+threeBytes:
+ MOVB $0xf4, 0(DI)
+ MOVW BX, 1(DI)
+ ADDQ $3, DI
+ ADDQ $3, DX
+ JMP memmove
+
+twoBytes:
+ MOVB $0xf0, 0(DI)
+ MOVB BX, 1(DI)
+ ADDQ $2, DI
+ ADDQ $2, DX
+ JMP memmove
+
+oneByte:
+ SHLB $2, BX
+ MOVB BX, 0(DI)
+ ADDQ $1, DI
+ ADDQ $1, DX
+
+memmove:
+ MOVQ DX, ret+48(FP)
+
+ // copy(dst[i:], lit)
+ //
+ // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
+ // DI, R10 and AX as arguments.
+ MOVQ DI, 0(SP)
+ MOVQ R10, 8(SP)
+ MOVQ AX, 16(SP)
+ CALL runtime·memmove(SB)
+ RET
+
+// ----------------------------------------------------------------------------
+
+// func emitCopy(dst []byte, offset, length int) int
+//
+// All local variables fit into registers. The register allocation:
+// - AX length
+// - SI &dst[0]
+// - DI &dst[i]
+// - R11 offset
+//
+// The unusual register allocation of local variables, such as R11 for the
+// offset, matches the allocation used at the call site in encodeBlock, which
+// makes it easier to manually inline this function.
+TEXT ·emitCopy(SB), NOSPLIT, $0-48
+ MOVQ dst_base+0(FP), DI
+ MOVQ DI, SI
+ MOVQ offset+24(FP), R11
+ MOVQ length+32(FP), AX
+
+loop0:
+ // for length >= 68 { etc }
+ CMPL AX, $68
+ JLT step1
+
+ // Emit a length 64 copy, encoded as 3 bytes.
+ MOVB $0xfe, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+ SUBL $64, AX
+ JMP loop0
+
+step1:
+ // if length > 64 { etc }
+ CMPL AX, $64
+ JLE step2
+
+ // Emit a length 60 copy, encoded as 3 bytes.
+ MOVB $0xee, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+ SUBL $60, AX
+
+step2:
+ // if length >= 12 || offset >= 2048 { goto step3 }
+ CMPL AX, $12
+ JGE step3
+ CMPL R11, $2048
+ JGE step3
+
+ // Emit the remaining copy, encoded as 2 bytes.
+ MOVB R11, 1(DI)
+ SHRL $8, R11
+ SHLB $5, R11
+ SUBB $4, AX
+ SHLB $2, AX
+ ORB AX, R11
+ ORB $1, R11
+ MOVB R11, 0(DI)
+ ADDQ $2, DI
+
+ // Return the number of bytes written.
+ SUBQ SI, DI
+ MOVQ DI, ret+40(FP)
+ RET
+
+step3:
+ // Emit the remaining copy, encoded as 3 bytes.
+ SUBL $1, AX
+ SHLB $2, AX
+ ORB $2, AX
+ MOVB AX, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+
+ // Return the number of bytes written.
+ SUBQ SI, DI
+ MOVQ DI, ret+40(FP)
+ RET
+
+// ----------------------------------------------------------------------------
+
+// func extendMatch(src []byte, i, j int) int
+//
+// All local variables fit into registers. The register allocation:
+// - DX &src[0]
+// - SI &src[j]
+// - R13 &src[len(src) - 8]
+// - R14 &src[len(src)]
+// - R15 &src[i]
+//
+// The unusual register allocation of local variables, such as R15 for a source
+// pointer, matches the allocation used at the call site in encodeBlock, which
+// makes it easier to manually inline this function.
+TEXT ·extendMatch(SB), NOSPLIT, $0-48
+ MOVQ src_base+0(FP), DX
+ MOVQ src_len+8(FP), R14
+ MOVQ i+24(FP), R15
+ MOVQ j+32(FP), SI
+ ADDQ DX, R14
+ ADDQ DX, R15
+ ADDQ DX, SI
+ MOVQ R14, R13
+ SUBQ $8, R13
+
+cmp8:
+ // As long as we are 8 or more bytes before the end of src, we can load and
+ // compare 8 bytes at a time. If those 8 bytes are equal, repeat.
+ CMPQ SI, R13
+ JA cmp1
+ MOVQ (R15), AX
+ MOVQ (SI), BX
+ CMPQ AX, BX
+ JNE bsf
+ ADDQ $8, R15
+ ADDQ $8, SI
+ JMP cmp8
+
+bsf:
+ // If those 8 bytes were not equal, XOR the two 8 byte values, and return
+ // the index of the first byte that differs. The BSF instruction finds the
+ // least significant 1 bit, the amd64 architecture is little-endian, and
+ // the shift by 3 converts a bit index to a byte index.
+ XORQ AX, BX
+ BSFQ BX, BX
+ SHRQ $3, BX
+ ADDQ BX, SI
+
+ // Convert from &src[ret] to ret.
+ SUBQ DX, SI
+ MOVQ SI, ret+40(FP)
+ RET
+
+cmp1:
+ // In src's tail, compare 1 byte at a time.
+ CMPQ SI, R14
+ JAE extendMatchEnd
+ MOVB (R15), AX
+ MOVB (SI), BX
+ CMPB AX, BX
+ JNE extendMatchEnd
+ ADDQ $1, R15
+ ADDQ $1, SI
+ JMP cmp1
+
+extendMatchEnd:
+ // Convert from &src[ret] to ret.
+ SUBQ DX, SI
+ MOVQ SI, ret+40(FP)
+ RET
+
+// ----------------------------------------------------------------------------
+
+// func encodeBlock(dst, src []byte) (d int)
+//
+// All local variables fit into registers, other than "var table". The register
+// allocation:
+// - AX . .
+// - BX . .
+// - CX 56 shift (note that amd64 shifts by non-immediates must use CX).
+// - DX 64 &src[0], tableSize
+// - SI 72 &src[s]
+// - DI 80 &dst[d]
+// - R9 88 sLimit
+// - R10 . &src[nextEmit]
+// - R11 96 prevHash, currHash, nextHash, offset
+// - R12 104 &src[base], skip
+// - R13 . &src[nextS], &src[len(src) - 8]
+// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x
+// - R15 112 candidate
+//
+// The second column (56, 64, etc) is the stack offset to spill the registers
+// when calling other functions. We could pack this slightly tighter, but it's
+// simpler to have a dedicated spill map independent of the function called.
+//
+// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An
+// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill
+// local variables (registers) during calls gives 32768 + 56 + 64 = 32888.
+TEXT ·encodeBlock(SB), 0, $32888-56
+ MOVQ dst_base+0(FP), DI
+ MOVQ src_base+24(FP), SI
+ MOVQ src_len+32(FP), R14
+
+ // shift, tableSize := uint32(32-8), 1<<8
+ MOVQ $24, CX
+ MOVQ $256, DX
+
+calcShift:
+ // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 {
+ // shift--
+ // }
+ CMPQ DX, $16384
+ JGE varTable
+ CMPQ DX, R14
+ JGE varTable
+ SUBQ $1, CX
+ SHLQ $1, DX
+ JMP calcShift
+
+varTable:
+ // var table [maxTableSize]uint16
+ //
+ // In the asm code, unlike the Go code, we can zero-initialize only the
+ // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU
+ // writes 16 bytes, so we can do only tableSize/8 writes instead of the
+ // 2048 writes that would zero-initialize all of table's 32768 bytes.
+ SHRQ $3, DX
+ LEAQ table-32768(SP), BX
+ PXOR X0, X0
+
+memclr:
+ MOVOU X0, 0(BX)
+ ADDQ $16, BX
+ SUBQ $1, DX
+ JNZ memclr
+
+ // !!! DX = &src[0]
+ MOVQ SI, DX
+
+ // sLimit := len(src) - inputMargin
+ MOVQ R14, R9
+ SUBQ $15, R9
+
+ // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't
+ // change for the rest of the function.
+ MOVQ CX, 56(SP)
+ MOVQ DX, 64(SP)
+ MOVQ R9, 88(SP)
+
+ // nextEmit := 0
+ MOVQ DX, R10
+
+ // s := 1
+ ADDQ $1, SI
+
+ // nextHash := hash(load32(src, s), shift)
+ MOVL 0(SI), R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+outer:
+ // for { etc }
+
+ // skip := 32
+ MOVQ $32, R12
+
+ // nextS := s
+ MOVQ SI, R13
+
+ // candidate := 0
+ MOVQ $0, R15
+
+inner0:
+ // for { etc }
+
+ // s := nextS
+ MOVQ R13, SI
+
+ // bytesBetweenHashLookups := skip >> 5
+ MOVQ R12, R14
+ SHRQ $5, R14
+
+ // nextS = s + bytesBetweenHashLookups
+ ADDQ R14, R13
+
+ // skip += bytesBetweenHashLookups
+ ADDQ R14, R12
+
+ // if nextS > sLimit { goto emitRemainder }
+ MOVQ R13, AX
+ SUBQ DX, AX
+ CMPQ AX, R9
+ JA emitRemainder
+
+ // candidate = int(table[nextHash])
+ // XXX: MOVWQZX table-32768(SP)(R11*2), R15
+ // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
+ BYTE $0x4e
+ BYTE $0x0f
+ BYTE $0xb7
+ BYTE $0x7c
+ BYTE $0x5c
+ BYTE $0x78
+
+ // table[nextHash] = uint16(s)
+ MOVQ SI, AX
+ SUBQ DX, AX
+
+ // XXX: MOVW AX, table-32768(SP)(R11*2)
+ // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
+ BYTE $0x66
+ BYTE $0x42
+ BYTE $0x89
+ BYTE $0x44
+ BYTE $0x5c
+ BYTE $0x78
+
+ // nextHash = hash(load32(src, nextS), shift)
+ MOVL 0(R13), R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+ // if load32(src, s) != load32(src, candidate) { continue } break
+ MOVL 0(SI), AX
+ MOVL (DX)(R15*1), BX
+ CMPL AX, BX
+ JNE inner0
+
+fourByteMatch:
+ // As per the encode_other.go code:
+ //
+ // A 4-byte match has been found. We'll later see etc.
+
+ // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment
+ // on inputMargin in encode.go.
+ MOVQ SI, AX
+ SUBQ R10, AX
+ CMPQ AX, $16
+ JLE emitLiteralFastPath
+
+ // ----------------------------------------
+ // Begin inline of the emitLiteral call.
+ //
+ // d += emitLiteral(dst[d:], src[nextEmit:s])
+
+ MOVL AX, BX
+ SUBL $1, BX
+
+ CMPL BX, $60
+ JLT inlineEmitLiteralOneByte
+ CMPL BX, $256
+ JLT inlineEmitLiteralTwoBytes
+
+inlineEmitLiteralThreeBytes:
+ MOVB $0xf4, 0(DI)
+ MOVW BX, 1(DI)
+ ADDQ $3, DI
+ JMP inlineEmitLiteralMemmove
+
+inlineEmitLiteralTwoBytes:
+ MOVB $0xf0, 0(DI)
+ MOVB BX, 1(DI)
+ ADDQ $2, DI
+ JMP inlineEmitLiteralMemmove
+
+inlineEmitLiteralOneByte:
+ SHLB $2, BX
+ MOVB BX, 0(DI)
+ ADDQ $1, DI
+
+inlineEmitLiteralMemmove:
+ // Spill local variables (registers) onto the stack; call; unspill.
+ //
+ // copy(dst[i:], lit)
+ //
+ // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push
+ // DI, R10 and AX as arguments.
+ MOVQ DI, 0(SP)
+ MOVQ R10, 8(SP)
+ MOVQ AX, 16(SP)
+ ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)".
+ MOVQ SI, 72(SP)
+ MOVQ DI, 80(SP)
+ MOVQ R15, 112(SP)
+ CALL runtime·memmove(SB)
+ MOVQ 56(SP), CX
+ MOVQ 64(SP), DX
+ MOVQ 72(SP), SI
+ MOVQ 80(SP), DI
+ MOVQ 88(SP), R9
+ MOVQ 112(SP), R15
+ JMP inner1
+
+inlineEmitLiteralEnd:
+ // End inline of the emitLiteral call.
+ // ----------------------------------------
+
+emitLiteralFastPath:
+ // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2".
+ MOVB AX, BX
+ SUBB $1, BX
+ SHLB $2, BX
+ MOVB BX, (DI)
+ ADDQ $1, DI
+
+ // !!! Implement the copy from lit to dst as a 16-byte load and store.
+ // (Encode's documentation says that dst and src must not overlap.)
+ //
+ // This always copies 16 bytes, instead of only len(lit) bytes, but that's
+ // OK. Subsequent iterations will fix up the overrun.
+ //
+ // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or
+ // 16-byte loads and stores. This technique probably wouldn't be as
+ // effective on architectures that are fussier about alignment.
+ MOVOU 0(R10), X0
+ MOVOU X0, 0(DI)
+ ADDQ AX, DI
+
+inner1:
+ // for { etc }
+
+ // base := s
+ MOVQ SI, R12
+
+ // !!! offset := base - candidate
+ MOVQ R12, R11
+ SUBQ R15, R11
+ SUBQ DX, R11
+
+ // ----------------------------------------
+ // Begin inline of the extendMatch call.
+ //
+ // s = extendMatch(src, candidate+4, s+4)
+
+ // !!! R14 = &src[len(src)]
+ MOVQ src_len+32(FP), R14
+ ADDQ DX, R14
+
+ // !!! R13 = &src[len(src) - 8]
+ MOVQ R14, R13
+ SUBQ $8, R13
+
+ // !!! R15 = &src[candidate + 4]
+ ADDQ $4, R15
+ ADDQ DX, R15
+
+ // !!! s += 4
+ ADDQ $4, SI
+
+inlineExtendMatchCmp8:
+ // As long as we are 8 or more bytes before the end of src, we can load and
+ // compare 8 bytes at a time. If those 8 bytes are equal, repeat.
+ CMPQ SI, R13
+ JA inlineExtendMatchCmp1
+ MOVQ (R15), AX
+ MOVQ (SI), BX
+ CMPQ AX, BX
+ JNE inlineExtendMatchBSF
+ ADDQ $8, R15
+ ADDQ $8, SI
+ JMP inlineExtendMatchCmp8
+
+inlineExtendMatchBSF:
+ // If those 8 bytes were not equal, XOR the two 8 byte values, and return
+ // the index of the first byte that differs. The BSF instruction finds the
+ // least significant 1 bit, the amd64 architecture is little-endian, and
+ // the shift by 3 converts a bit index to a byte index.
+ XORQ AX, BX
+ BSFQ BX, BX
+ SHRQ $3, BX
+ ADDQ BX, SI
+ JMP inlineExtendMatchEnd
+
+inlineExtendMatchCmp1:
+ // In src's tail, compare 1 byte at a time.
+ CMPQ SI, R14
+ JAE inlineExtendMatchEnd
+ MOVB (R15), AX
+ MOVB (SI), BX
+ CMPB AX, BX
+ JNE inlineExtendMatchEnd
+ ADDQ $1, R15
+ ADDQ $1, SI
+ JMP inlineExtendMatchCmp1
+
+inlineExtendMatchEnd:
+ // End inline of the extendMatch call.
+ // ----------------------------------------
+
+ // ----------------------------------------
+ // Begin inline of the emitCopy call.
+ //
+ // d += emitCopy(dst[d:], base-candidate, s-base)
+
+ // !!! length := s - base
+ MOVQ SI, AX
+ SUBQ R12, AX
+
+inlineEmitCopyLoop0:
+ // for length >= 68 { etc }
+ CMPL AX, $68
+ JLT inlineEmitCopyStep1
+
+ // Emit a length 64 copy, encoded as 3 bytes.
+ MOVB $0xfe, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+ SUBL $64, AX
+ JMP inlineEmitCopyLoop0
+
+inlineEmitCopyStep1:
+ // if length > 64 { etc }
+ CMPL AX, $64
+ JLE inlineEmitCopyStep2
+
+ // Emit a length 60 copy, encoded as 3 bytes.
+ MOVB $0xee, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+ SUBL $60, AX
+
+inlineEmitCopyStep2:
+ // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 }
+ CMPL AX, $12
+ JGE inlineEmitCopyStep3
+ CMPL R11, $2048
+ JGE inlineEmitCopyStep3
+
+ // Emit the remaining copy, encoded as 2 bytes.
+ MOVB R11, 1(DI)
+ SHRL $8, R11
+ SHLB $5, R11
+ SUBB $4, AX
+ SHLB $2, AX
+ ORB AX, R11
+ ORB $1, R11
+ MOVB R11, 0(DI)
+ ADDQ $2, DI
+ JMP inlineEmitCopyEnd
+
+inlineEmitCopyStep3:
+ // Emit the remaining copy, encoded as 3 bytes.
+ SUBL $1, AX
+ SHLB $2, AX
+ ORB $2, AX
+ MOVB AX, 0(DI)
+ MOVW R11, 1(DI)
+ ADDQ $3, DI
+
+inlineEmitCopyEnd:
+ // End inline of the emitCopy call.
+ // ----------------------------------------
+
+ // nextEmit = s
+ MOVQ SI, R10
+
+ // if s >= sLimit { goto emitRemainder }
+ MOVQ SI, AX
+ SUBQ DX, AX
+ CMPQ AX, R9
+ JAE emitRemainder
+
+ // As per the encode_other.go code:
+ //
+ // We could immediately etc.
+
+ // x := load64(src, s-1)
+ MOVQ -1(SI), R14
+
+ // prevHash := hash(uint32(x>>0), shift)
+ MOVL R14, R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+ // table[prevHash] = uint16(s-1)
+ MOVQ SI, AX
+ SUBQ DX, AX
+ SUBQ $1, AX
+
+ // XXX: MOVW AX, table-32768(SP)(R11*2)
+ // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
+ BYTE $0x66
+ BYTE $0x42
+ BYTE $0x89
+ BYTE $0x44
+ BYTE $0x5c
+ BYTE $0x78
+
+ // currHash := hash(uint32(x>>8), shift)
+ SHRQ $8, R14
+ MOVL R14, R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+ // candidate = int(table[currHash])
+ // XXX: MOVWQZX table-32768(SP)(R11*2), R15
+ // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15
+ BYTE $0x4e
+ BYTE $0x0f
+ BYTE $0xb7
+ BYTE $0x7c
+ BYTE $0x5c
+ BYTE $0x78
+
+ // table[currHash] = uint16(s)
+ ADDQ $1, AX
+
+ // XXX: MOVW AX, table-32768(SP)(R11*2)
+ // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2)
+ BYTE $0x66
+ BYTE $0x42
+ BYTE $0x89
+ BYTE $0x44
+ BYTE $0x5c
+ BYTE $0x78
+
+ // if uint32(x>>8) == load32(src, candidate) { continue }
+ MOVL (DX)(R15*1), BX
+ CMPL R14, BX
+ JEQ inner1
+
+ // nextHash = hash(uint32(x>>16), shift)
+ SHRQ $8, R14
+ MOVL R14, R11
+ IMULL $0x1e35a7bd, R11
+ SHRL CX, R11
+
+ // s++
+ ADDQ $1, SI
+
+ // break out of the inner1 for loop, i.e. continue the outer loop.
+ JMP outer
+
+emitRemainder:
+ // if nextEmit < len(src) { etc }
+ MOVQ src_len+32(FP), AX
+ ADDQ DX, AX
+ CMPQ R10, AX
+ JEQ encodeBlockEnd
+
+ // d += emitLiteral(dst[d:], src[nextEmit:])
+ //
+ // Push args.
+ MOVQ DI, 0(SP)
+ MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative.
+ MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative.
+ MOVQ R10, 24(SP)
+ SUBQ R10, AX
+ MOVQ AX, 32(SP)
+ MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative.
+
+ // Spill local variables (registers) onto the stack; call; unspill.
+ MOVQ DI, 80(SP)
+ CALL ·emitLiteral(SB)
+ MOVQ 80(SP), DI
+
+ // Finish the "d +=" part of "d += emitLiteral(etc)".
+ ADDQ 48(SP), DI
+
+encodeBlockEnd:
+ MOVQ dst_base+0(FP), AX
+ SUBQ AX, DI
+ MOVQ DI, d+48(FP)
+ RET
diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go
new file mode 100644
index 0000000..0cf5e37
--- /dev/null
+++ b/vendor/github.com/golang/snappy/snappy.go
@@ -0,0 +1,87 @@
+// Copyright 2011 The Snappy-Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package snappy implements the snappy block-based compression format.
+// It aims for very high speeds and reasonable compression.
+//
+// The C++ snappy implementation is at https://github.com/google/snappy
+package snappy // import "github.com/golang/snappy"
+
+import (
+ "hash/crc32"
+)
+
+/*
+Each encoded block begins with the varint-encoded length of the decoded data,
+followed by a sequence of chunks. Chunks begin and end on byte boundaries. The
+first byte of each chunk is broken into its 2 least and 6 most significant bits
+called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag.
+Zero means a literal tag. All other values mean a copy tag.
+
+For literal tags:
+ - If m < 60, the next 1 + m bytes are literal bytes.
+ - Otherwise, let n be the little-endian unsigned integer denoted by the next
+ m - 59 bytes. The next 1 + n bytes after that are literal bytes.
+
+For copy tags, length bytes are copied from offset bytes ago, in the style of
+Lempel-Ziv compression algorithms. In particular:
+ - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12).
+ The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10
+ of the offset. The next byte is bits 0-7 of the offset.
+ - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65).
+ The length is 1 + m. The offset is the little-endian unsigned integer
+ denoted by the next 2 bytes.
+ - For l == 3, this tag is a legacy format that is no longer issued by most
+ encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in
+ [1, 65). The length is 1 + m. The offset is the little-endian unsigned
+ integer denoted by the next 4 bytes.
+*/
+const (
+ tagLiteral = 0x00
+ tagCopy1 = 0x01
+ tagCopy2 = 0x02
+ tagCopy4 = 0x03
+)
+
+const (
+ checksumSize = 4
+ chunkHeaderSize = 4
+ magicChunk = "\xff\x06\x00\x00" + magicBody
+ magicBody = "sNaPpY"
+
+ // maxBlockSize is the maximum size of the input to encodeBlock. It is not
+ // part of the wire format per se, but some parts of the encoder assume
+ // that an offset fits into a uint16.
+ //
+ // Also, for the framing format (Writer type instead of Encode function),
+ // https://github.com/google/snappy/blob/master/framing_format.txt says
+ // that "the uncompressed data in a chunk must be no longer than 65536
+ // bytes".
+ maxBlockSize = 65536
+
+ // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is
+ // hard coded to be a const instead of a variable, so that obufLen can also
+ // be a const. Their equivalence is confirmed by
+ // TestMaxEncodedLenOfMaxBlockSize.
+ maxEncodedLenOfMaxBlockSize = 76490
+
+ obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize
+ obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize
+)
+
+const (
+ chunkTypeCompressedData = 0x00
+ chunkTypeUncompressedData = 0x01
+ chunkTypePadding = 0xfe
+ chunkTypeStreamIdentifier = 0xff
+)
+
+var crcTable = crc32.MakeTable(crc32.Castagnoli)
+
+// crc implements the checksum specified in section 3 of
+// https://github.com/google/snappy/blob/master/framing_format.txt
+func crc(b []byte) uint32 {
+ c := crc32.Update(0, crcTable, b)
+ return uint32(c>>15|c<<17) + 0xa282ead8
+}
diff --git a/vendor/github.com/google/go-github/github/activity_events.go b/vendor/github.com/google/go-github/github/activity_events.go
index 78219f8..6b35056 100644
--- a/vendor/github.com/google/go-github/github/activity_events.go
+++ b/vendor/github.com/google/go-github/github/activity_events.go
@@ -46,10 +46,10 @@ func (e *Event) ParsePayload() (payload interface{}, err error) {
payload = &ForkEvent{}
case "GollumEvent":
payload = &GollumEvent{}
- case "IntegrationInstallationEvent":
- payload = &IntegrationInstallationEvent{}
- case "IntegrationInstallationRepositoriesEvent":
- payload = &IntegrationInstallationRepositoriesEvent{}
+ case "InstallationEvent":
+ payload = &InstallationEvent{}
+ case "InstallationRepositoriesEvent":
+ payload = &InstallationRepositoriesEvent{}
case "IssueCommentEvent":
payload = &IssueCommentEvent{}
case "IssuesEvent":
@@ -64,6 +64,8 @@ func (e *Event) ParsePayload() (payload interface{}, err error) {
payload = &MilestoneEvent{}
case "OrganizationEvent":
payload = &OrganizationEvent{}
+ case "OrgBlockEvent":
+ payload = &OrgBlockEvent{}
case "PageBuildEvent":
payload = &PageBuildEvent{}
case "PingEvent":
@@ -90,6 +92,8 @@ func (e *Event) ParsePayload() (payload interface{}, err error) {
payload = &RepositoryEvent{}
case "StatusEvent":
payload = &StatusEvent{}
+ case "TeamEvent":
+ payload = &TeamEvent{}
case "TeamAddEvent":
payload = &TeamAddEvent{}
case "WatchEvent":
diff --git a/vendor/github.com/google/go-github/github/integration.go b/vendor/github.com/google/go-github/github/apps.go
index 6d74e44..ff33893 100644
--- a/vendor/github.com/google/go-github/github/integration.go
+++ b/vendor/github.com/google/go-github/github/apps.go
@@ -7,17 +7,17 @@ package github
import "context"
-// IntegrationsService provides access to the installation related functions
+// AppsService provides access to the installation related functions
// in the GitHub API.
//
-// GitHub API docs: https://developer.github.com/v3/integrations/
-type IntegrationsService service
+// GitHub API docs: https://developer.github.com/v3/apps/
+type AppsService service
-// ListInstallations lists the installations that the current integration has.
+// ListInstallations lists the installations that the current GitHub App has.
//
-// GitHub API docs: https://developer.github.com/v3/integrations/#find-installations
-func (s *IntegrationsService) ListInstallations(ctx context.Context, opt *ListOptions) ([]*Installation, *Response, error) {
- u, err := addOptions("integration/installations", opt)
+// GitHub API docs: https://developer.github.com/v3/apps/#find-installations
+func (s *AppsService) ListInstallations(ctx context.Context, opt *ListOptions) ([]*Installation, *Response, error) {
+ u, err := addOptions("app/installations", opt)
if err != nil {
return nil, nil, err
}
diff --git a/vendor/github.com/google/go-github/github/apps_installation.go b/vendor/github.com/google/go-github/github/apps_installation.go
new file mode 100644
index 0000000..5c93291
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/apps_installation.go
@@ -0,0 +1,84 @@
+// Copyright 2016 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "context"
+ "fmt"
+)
+
+// Installation represents a GitHub Apps installation.
+type Installation struct {
+ ID *int `json:"id,omitempty"`
+ Account *User `json:"account,omitempty"`
+ AccessTokensURL *string `json:"access_tokens_url,omitempty"`
+ RepositoriesURL *string `json:"repositories_url,omitempty"`
+ HTMLURL *string `json:"html_url,omitempty"`
+}
+
+func (i Installation) String() string {
+ return Stringify(i)
+}
+
+// ListRepos lists the repositories that are accessible to the authenticated installation.
+//
+// GitHub API docs: https://developer.github.com/v3/apps/installations/#list-repositories
+func (s *AppsService) ListRepos(ctx context.Context, opt *ListOptions) ([]*Repository, *Response, error) {
+ u, err := addOptions("installation/repositories", opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeIntegrationPreview)
+
+ var r struct {
+ Repositories []*Repository `json:"repositories"`
+ }
+ resp, err := s.client.Do(ctx, req, &r)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return r.Repositories, resp, nil
+}
+
+// AddRepository adds a single repository to an installation.
+//
+// GitHub API docs: https://developer.github.com/v3/apps/installations/#add-repository-to-installation
+func (s *AppsService) AddRepository(ctx context.Context, instID, repoID int) (*Repository, *Response, error) {
+ u := fmt.Sprintf("apps/installations/%v/repositories/%v", instID, repoID)
+ req, err := s.client.NewRequest("PUT", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ r := new(Repository)
+ resp, err := s.client.Do(ctx, req, r)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return r, resp, nil
+}
+
+// RemoveRepository removes a single repository from an installation.
+//
+// GitHub docs: https://developer.github.com/v3/apps/installations/#remove-repository-from-installation
+func (s *AppsService) RemoveRepository(ctx context.Context, instID, repoID int) (*Response, error) {
+ u := fmt.Sprintf("apps/installations/%v/repositories/%v", instID, repoID)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(ctx, req, nil)
+}
diff --git a/vendor/github.com/google/go-github/github/authorizations.go b/vendor/github.com/google/go-github/github/authorizations.go
index 181e83d..638745a 100644
--- a/vendor/github.com/google/go-github/github/authorizations.go
+++ b/vendor/github.com/google/go-github/github/authorizations.go
@@ -63,7 +63,7 @@ type Authorization struct {
App *AuthorizationApp `json:"app,omitempty"`
Note *string `json:"note,omitempty"`
NoteURL *string `json:"note_url,omitempty"`
- UpdateAt *Timestamp `json:"updated_at,omitempty"`
+ UpdatedAt *Timestamp `json:"updated_at,omitempty"`
CreatedAt *Timestamp `json:"created_at,omitempty"`
Fingerprint *string `json:"fingerprint,omitempty"`
@@ -343,8 +343,13 @@ func (s *AuthorizationsService) Revoke(ctx context.Context, clientID string, tok
// tokens an application has generated for the user.
//
// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#list-your-grants
-func (s *AuthorizationsService) ListGrants(ctx context.Context) ([]*Grant, *Response, error) {
- req, err := s.client.NewRequest("GET", "applications/grants", nil)
+func (s *AuthorizationsService) ListGrants(ctx context.Context, opt *ListOptions) ([]*Grant, *Response, error) {
+ u, err := addOptions("applications/grants", opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
diff --git a/vendor/github.com/google/go-github/github/doc.go b/vendor/github.com/google/go-github/github/doc.go
index 0acf328..4ba03cb 100644
--- a/vendor/github.com/google/go-github/github/doc.go
+++ b/vendor/github.com/google/go-github/github/doc.go
@@ -63,19 +63,38 @@ See the oauth2 docs for complete instructions on using that library.
For API methods that require HTTP Basic Authentication, use the
BasicAuthTransport.
+GitHub Apps authentication can be provided by the
+https://github.com/bradleyfalzon/ghinstallation package.
+
+ import "github.com/bradleyfalzon/ghinstallation"
+
+ func main() {
+ // Wrap the shared transport for use with the integration ID 1 authenticating with installation ID 99.
+ itr, err := ghinstallation.NewKeyFromFile(http.DefaultTransport, 1, 99, "2016-10-19.private-key.pem")
+ if err != nil {
+ // Handle error.
+ }
+
+ // Use installation transport with client
+ client := github.NewClient(&http.Client{Transport: itr})
+
+ // Use client...
+ }
+
Rate Limiting
GitHub imposes a rate limit on all API clients. Unauthenticated clients are
limited to 60 requests per hour, while authenticated clients can make up to
-5,000 requests per hour. To receive the higher rate limit when making calls
-that are not issued on behalf of a user, use the
-UnauthenticatedRateLimitedTransport.
+5,000 requests per hour. The Search API has a custom rate limit. Unauthenticated
+clients are limited to 10 requests per minute, while authenticated clients
+can make up to 30 requests per minute. To receive the higher rate limit when
+making calls that are not issued on behalf of a user,
+use UnauthenticatedRateLimitedTransport.
-The Rate method on a client returns the rate limit information based on the most
-recent API call. This is updated on every call, but may be out of date if it's
-been some time since the last API call and other clients have made subsequent
-requests since then. You can always call RateLimits() directly to get the most
-up-to-date rate limit data for the client.
+The returned Response.Rate value contains the rate limit information
+from the most recent API call. If a recent enough response isn't
+available, you can use RateLimits to fetch the most up-to-date rate
+limit data for the client.
To detect an API rate limit error, you can check if its type is *github.RateLimitError:
@@ -154,7 +173,7 @@ github.Response struct.
if resp.NextPage == 0 {
break
}
- opt.ListOptions.Page = resp.NextPage
+ opt.Page = resp.NextPage
}
Google App Engine
@@ -164,7 +183,7 @@ the "context" import and still relies on "golang.org/x/net/context".
As a result, if you wish to continue to use "go-github" on App Engine Classic,
you will need to rewrite all the "context" imports using the following command:
- gofmt -w -r '"context" -> "golang.org/x/net/context"' *.go
+ gofmt -w -r '"context" -> "golang.org/x/net/context"' *.go
See "with_appengine.go" for more details.
diff --git a/vendor/github.com/google/go-github/github/event_types.go b/vendor/github.com/google/go-github/github/event_types.go
index acf139b..c6b29b9 100644
--- a/vendor/github.com/google/go-github/github/event_types.go
+++ b/vendor/github.com/google/go-github/github/event_types.go
@@ -165,28 +165,49 @@ type ProjectColumnChange struct {
} `json:"name,omitempty"`
}
-// IntegrationInstallationEvent is triggered when an integration is created or deleted.
-// The Webhook event name is "integration_installation".
-//
-// GitHub API docs: https://developer.github.com/early-access/integrations/webhooks/#integrationinstallationevent
-type IntegrationInstallationEvent struct {
- // The action that was performed. Possible values for an "integration_installation"
- // event are: "created", "deleted".
+// TeamChange represents the changes when a team has been edited.
+type TeamChange struct {
+ Description *struct {
+ From *string `json:"from,omitempty"`
+ } `json:"description,omitempty"`
+ Name *struct {
+ From *string `json:"from,omitempty"`
+ } `json:"name,omitempty"`
+ Privacy *struct {
+ From *string `json:"from,omitempty"`
+ } `json:"privacy,omitempty"`
+ Repository *struct {
+ Permissions *struct {
+ From *struct {
+ Admin *bool `json:"admin,omitempty"`
+ Pull *bool `json:"pull,omitempty"`
+ Push *bool `json:"push,omitempty"`
+ } `json:"from,omitempty"`
+ } `json:"permissions,omitempty"`
+ } `json:"repository,omitempty"`
+}
+
+// InstallationEvent is triggered when a GitHub App has been installed or uninstalled.
+// The Webhook event name is "installation".
+//
+// GitHub API docs: https://developer.github.com/v3/activity/events/types/#installationevent
+type InstallationEvent struct {
+ // The action that was performed. Can be either "created" or "deleted".
Action *string `json:"action,omitempty"`
Sender *User `json:"sender,omitempty"`
Installation *Installation `json:"installation,omitempty"`
}
-// IntegrationInstallationRepositoriesEvent is triggered when an integration repository
-// is added or removed. The Webhook event name is "integration_installation_repositories".
+// InstallationRepositoriesEvent is triggered when a repository is added or
+// removed from an installation. The Webhook event name is "installation_repositories".
//
-// GitHub API docs: https://developer.github.com/early-access/integrations/webhooks/#integrationinstallationrepositoriesevent
-type IntegrationInstallationRepositoriesEvent struct {
- // The action that was performed. Possible values for an "integration_installation_repositories"
- // event are: "added", "removed".
+// GitHub API docs: https://developer.github.com/v3/activity/events/types/#installationrepositoriesevent
+type InstallationRepositoriesEvent struct {
+ // The action that was performed. Can be either "added" or "removed".
Action *string `json:"action,omitempty"`
RepositoriesAdded []*Repository `json:"repositories_added,omitempty"`
RepositoriesRemoved []*Repository `json:"repositories_removed,omitempty"`
+ RepositorySelection *string `json:"repository_selection,omitempty"`
Sender *User `json:"sender,omitempty"`
Installation *Installation `json:"installation,omitempty"`
}
@@ -323,6 +344,22 @@ type OrganizationEvent struct {
Installation *Installation `json:"installation,omitempty"`
}
+// OrgBlockEvent is triggered when an organization blocks or unblocks a user.
+// The Webhook event name is "org_block".
+//
+// GitHub API docs: https://developer.github.com/v3/activity/events/types/#orgblockevent
+type OrgBlockEvent struct {
+ // Action is the action that was performed.
+ // Can be "blocked" or "unblocked".
+ Action *string `json:"action,omitempty"`
+ BlockedUser *User `json:"blocked_user,omitempty"`
+ Organization *Organization `json:"organization,omitempty"`
+ Sender *User `json:"sender,omitempty"`
+
+ // The following fields are only populated by Webhook events.
+ Installation *Installation `json:"installation,omitempty"`
+}
+
// PageBuildEvent represents an attempted build of a GitHub Pages site, whether
// successful or not.
// The Webhook event name is "page_build".
@@ -637,6 +674,25 @@ type StatusEvent struct {
Installation *Installation `json:"installation,omitempty"`
}
+// TeamEvent is triggered when an organization's team is created, modified or deleted.
+// The Webhook event name is "team".
+//
+// Events of this type are not visible in timelines. These events are only used
+// to trigger hooks.
+//
+// GitHub API docs: https://developer.github.com/v3/activity/events/types/#teamevent
+type TeamEvent struct {
+ Action *string `json:"action,omitempty"`
+ Team *Team `json:"team,omitempty"`
+ Changes *TeamChange `json:"changes,omitempty"`
+ Repo *Repository `json:"repository,omitempty"`
+
+ // The following fields are only populated by Webhook events.
+ Org *Organization `json:"organization,omitempty"`
+ Sender *User `json:"sender,omitempty"`
+ Installation *Installation `json:"installation,omitempty"`
+}
+
// TeamAddEvent is triggered when a repository is added to a team.
// The Webhook event name is "team_add".
//
diff --git a/vendor/github.com/google/go-github/github/gists.go b/vendor/github.com/google/go-github/github/gists.go
index e7d6586..15276ea 100644
--- a/vendor/github.com/google/go-github/github/gists.go
+++ b/vendor/github.com/google/go-github/github/gists.go
@@ -241,8 +241,13 @@ func (s *GistsService) Edit(ctx context.Context, id string, gist *Gist) (*Gist,
// ListCommits lists commits of a gist.
//
// GitHub API docs: https://developer.github.com/v3/gists/#list-gist-commits
-func (s *GistsService) ListCommits(ctx context.Context, id string) ([]*GistCommit, *Response, error) {
+func (s *GistsService) ListCommits(ctx context.Context, id string, opt *ListOptions) ([]*GistCommit, *Response, error) {
u := fmt.Sprintf("gists/%v/commits", id)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
diff --git a/vendor/github.com/google/go-github/github/git_refs.go b/vendor/github.com/google/go-github/github/git_refs.go
index bd5df3f..e78fdc6 100644
--- a/vendor/github.com/google/go-github/github/git_refs.go
+++ b/vendor/github.com/google/go-github/github/git_refs.go
@@ -7,6 +7,8 @@ package github
import (
"context"
+ "encoding/json"
+ "errors"
"fmt"
"strings"
)
@@ -45,7 +47,11 @@ type updateRefRequest struct {
Force *bool `json:"force"`
}
-// GetRef fetches the Reference object for a given Git ref.
+// GetRef fetches a single Reference object for a given Git ref.
+// If there is no exact match, GetRef will return an error.
+//
+// Note: The GitHub API can return multiple matches.
+// If you wish to use this functionality please use the GetRefs() method.
//
// GitHub API docs: https://developer.github.com/v3/git/refs/#get-a-reference
func (s *GitService) GetRef(ctx context.Context, owner string, repo string, ref string) (*Reference, *Response, error) {
@@ -58,13 +64,61 @@ func (s *GitService) GetRef(ctx context.Context, owner string, repo string, ref
r := new(Reference)
resp, err := s.client.Do(ctx, req, r)
- if err != nil {
+ if _, ok := err.(*json.UnmarshalTypeError); ok {
+ // Multiple refs, means there wasn't an exact match.
+ return nil, resp, errors.New("no exact match found for this ref")
+ } else if err != nil {
return nil, resp, err
}
return r, resp, nil
}
+// GetRefs fetches a slice of Reference objects for a given Git ref.
+// If there is an exact match, only that ref is returned.
+// If there is no exact match, GitHub returns all refs that start with ref.
+// If returned error is nil, there will be at least 1 ref returned.
+// For example:
+//
+// "heads/featureA" -> ["refs/heads/featureA"] // Exact match, single ref is returned.
+// "heads/feature" -> ["refs/heads/featureA", "refs/heads/featureB"] // All refs that start with ref.
+// "heads/notexist" -> [] // Returns an error.
+//
+// GitHub API docs: https://developer.github.com/v3/git/refs/#get-a-reference
+func (s *GitService) GetRefs(ctx context.Context, owner string, repo string, ref string) ([]*Reference, *Response, error) {
+ ref = strings.TrimPrefix(ref, "refs/")
+ u := fmt.Sprintf("repos/%v/%v/git/refs/%v", owner, repo, ref)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var rawJSON json.RawMessage
+ resp, err := s.client.Do(ctx, req, &rawJSON)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ // Prioritize the most common case: a single returned ref.
+ r := new(Reference)
+ singleUnmarshalError := json.Unmarshal(rawJSON, r)
+ if singleUnmarshalError == nil {
+ return []*Reference{r}, resp, nil
+ }
+
+ // Attempt to unmarshal multiple refs.
+ var rs []*Reference
+ multipleUnmarshalError := json.Unmarshal(rawJSON, &rs)
+ if multipleUnmarshalError == nil {
+ if len(rs) == 0 {
+ return nil, resp, fmt.Errorf("unexpected response from GitHub API: an array of refs with length 0")
+ }
+ return rs, resp, nil
+ }
+
+ return nil, resp, fmt.Errorf("unmarshalling failed for both single and multiple refs: %s and %s", singleUnmarshalError, multipleUnmarshalError)
+}
+
// ReferenceListOptions specifies optional parameters to the
// GitService.ListRefs method.
type ReferenceListOptions struct {
diff --git a/vendor/github.com/google/go-github/github/git_trees.go b/vendor/github.com/google/go-github/github/git_trees.go
index bdd481f..4d6809a 100644
--- a/vendor/github.com/google/go-github/github/git_trees.go
+++ b/vendor/github.com/google/go-github/github/git_trees.go
@@ -30,6 +30,7 @@ type TreeEntry struct {
Type *string `json:"type,omitempty"`
Size *int `json:"size,omitempty"`
Content *string `json:"content,omitempty"`
+ URL *string `json:"url,omitempty"`
}
func (t TreeEntry) String() string {
diff --git a/vendor/github.com/google/go-github/github/github-accessors.go b/vendor/github.com/google/go-github/github/github-accessors.go
index c74c025..6da009b 100644
--- a/vendor/github.com/google/go-github/github/github-accessors.go
+++ b/vendor/github.com/google/go-github/github/github-accessors.go
@@ -20,6 +20,14 @@ func (a *AbuseRateLimitError) GetRetryAfter() time.Duration {
return *a.RetryAfter
}
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (a *AdminEnforcement) GetURL() string {
+ if a == nil || a.URL == nil {
+ return ""
+ }
+ return *a.URL
+}
+
// GetVerifiablePasswordAuthentication returns the VerifiablePasswordAuthentication field if it's non-nil, zero value otherwise.
func (a *APIMeta) GetVerifiablePasswordAuthentication() bool {
if a == nil || a.VerifiablePasswordAuthentication == nil {
@@ -92,12 +100,12 @@ func (a *Authorization) GetTokenLastEight() string {
return *a.TokenLastEight
}
-// GetUpdateAt returns the UpdateAt field if it's non-nil, zero value otherwise.
-func (a *Authorization) GetUpdateAt() Timestamp {
- if a == nil || a.UpdateAt == nil {
+// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise.
+func (a *Authorization) GetUpdatedAt() Timestamp {
+ if a == nil || a.UpdatedAt == nil {
return Timestamp{}
}
- return *a.UpdateAt
+ return *a.UpdatedAt
}
// GetURL returns the URL field if it's non-nil, zero value otherwise.
@@ -252,6 +260,38 @@ func (b *Branch) GetProtected() bool {
return *b.Protected
}
+// GetBody returns the Body field if it's non-nil, zero value otherwise.
+func (c *CodeOfConduct) GetBody() string {
+ if c == nil || c.Body == nil {
+ return ""
+ }
+ return *c.Body
+}
+
+// GetKey returns the Key field if it's non-nil, zero value otherwise.
+func (c *CodeOfConduct) GetKey() string {
+ if c == nil || c.Key == nil {
+ return ""
+ }
+ return *c.Key
+}
+
+// GetName returns the Name field if it's non-nil, zero value otherwise.
+func (c *CodeOfConduct) GetName() string {
+ if c == nil || c.Name == nil {
+ return ""
+ }
+ return *c.Name
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (c *CodeOfConduct) GetURL() string {
+ if c == nil || c.URL == nil {
+ return ""
+ }
+ return *c.URL
+}
+
// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise.
func (c *CodeResult) GetHTMLURL() string {
if c == nil || c.HTMLURL == nil {
@@ -556,6 +596,38 @@ func (c *CommitsComparison) GetBehindBy() int {
return *c.BehindBy
}
+// GetDiffURL returns the DiffURL field if it's non-nil, zero value otherwise.
+func (c *CommitsComparison) GetDiffURL() string {
+ if c == nil || c.DiffURL == nil {
+ return ""
+ }
+ return *c.DiffURL
+}
+
+// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise.
+func (c *CommitsComparison) GetHTMLURL() string {
+ if c == nil || c.HTMLURL == nil {
+ return ""
+ }
+ return *c.HTMLURL
+}
+
+// GetPatchURL returns the PatchURL field if it's non-nil, zero value otherwise.
+func (c *CommitsComparison) GetPatchURL() string {
+ if c == nil || c.PatchURL == nil {
+ return ""
+ }
+ return *c.PatchURL
+}
+
+// GetPermalinkURL returns the PermalinkURL field if it's non-nil, zero value otherwise.
+func (c *CommitsComparison) GetPermalinkURL() string {
+ if c == nil || c.PermalinkURL == nil {
+ return ""
+ }
+ return *c.PermalinkURL
+}
+
// GetStatus returns the Status field if it's non-nil, zero value otherwise.
func (c *CommitsComparison) GetStatus() string {
if c == nil || c.Status == nil {
@@ -572,6 +644,14 @@ func (c *CommitsComparison) GetTotalCommits() int {
return *c.TotalCommits
}
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (c *CommitsComparison) GetURL() string {
+ if c == nil || c.URL == nil {
+ return ""
+ }
+ return *c.URL
+}
+
// GetIncompleteResults returns the IncompleteResults field if it's non-nil, zero value otherwise.
func (c *CommitsSearchResult) GetIncompleteResults() bool {
if c == nil || c.IncompleteResults == nil {
@@ -612,6 +692,22 @@ func (c *CommitStats) GetTotal() int {
return *c.Total
}
+// GetHealthPercentage returns the HealthPercentage field if it's non-nil, zero value otherwise.
+func (c *CommunityHealthMetrics) GetHealthPercentage() int {
+ if c == nil || c.HealthPercentage == nil {
+ return 0
+ }
+ return *c.HealthPercentage
+}
+
+// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise.
+func (c *CommunityHealthMetrics) GetUpdatedAt() time.Time {
+ if c == nil || c.UpdatedAt == nil {
+ return time.Time{}
+ }
+ return *c.UpdatedAt
+}
+
// GetAvatarURL returns the AvatarURL field if it's non-nil, zero value otherwise.
func (c *Contributor) GetAvatarURL() string {
if c == nil || c.AvatarURL == nil {
@@ -1917,7 +2013,7 @@ func (i *Installation) GetRepositoriesURL() string {
}
// GetAction returns the Action field if it's non-nil, zero value otherwise.
-func (i *IntegrationInstallationEvent) GetAction() string {
+func (i *InstallationEvent) GetAction() string {
if i == nil || i.Action == nil {
return ""
}
@@ -1925,13 +2021,21 @@ func (i *IntegrationInstallationEvent) GetAction() string {
}
// GetAction returns the Action field if it's non-nil, zero value otherwise.
-func (i *IntegrationInstallationRepositoriesEvent) GetAction() string {
+func (i *InstallationRepositoriesEvent) GetAction() string {
if i == nil || i.Action == nil {
return ""
}
return *i.Action
}
+// GetRepositorySelection returns the RepositorySelection field if it's non-nil, zero value otherwise.
+func (i *InstallationRepositoriesEvent) GetRepositorySelection() string {
+ if i == nil || i.RepositorySelection == nil {
+ return ""
+ }
+ return *i.RepositorySelection
+}
+
// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
func (i *Invitation) GetCreatedAt() time.Time {
if i == nil || i.CreatedAt == nil {
@@ -1996,6 +2100,14 @@ func (i *Issue) GetComments() int {
return *i.Comments
}
+// GetCommentsURL returns the CommentsURL field if it's non-nil, zero value otherwise.
+func (i *Issue) GetCommentsURL() string {
+ if i == nil || i.CommentsURL == nil {
+ return ""
+ }
+ return *i.CommentsURL
+}
+
// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
func (i *Issue) GetCreatedAt() time.Time {
if i == nil || i.CreatedAt == nil {
@@ -2004,6 +2116,14 @@ func (i *Issue) GetCreatedAt() time.Time {
return *i.CreatedAt
}
+// GetEventsURL returns the EventsURL field if it's non-nil, zero value otherwise.
+func (i *Issue) GetEventsURL() string {
+ if i == nil || i.EventsURL == nil {
+ return ""
+ }
+ return *i.EventsURL
+}
+
// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise.
func (i *Issue) GetHTMLURL() string {
if i == nil || i.HTMLURL == nil {
@@ -2020,6 +2140,14 @@ func (i *Issue) GetID() int {
return *i.ID
}
+// GetLabelsURL returns the LabelsURL field if it's non-nil, zero value otherwise.
+func (i *Issue) GetLabelsURL() string {
+ if i == nil || i.LabelsURL == nil {
+ return ""
+ }
+ return *i.LabelsURL
+}
+
// GetLocked returns the Locked field if it's non-nil, zero value otherwise.
func (i *Issue) GetLocked() bool {
if i == nil || i.Locked == nil {
@@ -2036,6 +2164,14 @@ func (i *Issue) GetNumber() int {
return *i.Number
}
+// GetRepositoryURL returns the RepositoryURL field if it's non-nil, zero value otherwise.
+func (i *Issue) GetRepositoryURL() string {
+ if i == nil || i.RepositoryURL == nil {
+ return ""
+ }
+ return *i.RepositoryURL
+}
+
// GetState returns the State field if it's non-nil, zero value otherwise.
func (i *Issue) GetState() string {
if i == nil || i.State == nil {
@@ -2548,6 +2684,38 @@ func (m *MembershipEvent) GetScope() string {
return *m.Scope
}
+// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise.
+func (m *Metric) GetHTMLURL() string {
+ if m == nil || m.HTMLURL == nil {
+ return ""
+ }
+ return *m.HTMLURL
+}
+
+// GetKey returns the Key field if it's non-nil, zero value otherwise.
+func (m *Metric) GetKey() string {
+ if m == nil || m.Key == nil {
+ return ""
+ }
+ return *m.Key
+}
+
+// GetName returns the Name field if it's non-nil, zero value otherwise.
+func (m *Metric) GetName() string {
+ if m == nil || m.Name == nil {
+ return ""
+ }
+ return *m.Name
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (m *Metric) GetURL() string {
+ if m == nil || m.URL == nil {
+ return ""
+ }
+ return *m.URL
+}
+
// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
func (m *Migration) GetCreatedAt() string {
if m == nil || m.CreatedAt == nil {
@@ -3109,6 +3277,14 @@ func (o *OrganizationEvent) GetAction() string {
}
// GetAction returns the Action field if it's non-nil, zero value otherwise.
+func (o *OrgBlockEvent) GetAction() string {
+ if o == nil || o.Action == nil {
+ return ""
+ }
+ return *o.Action
+}
+
+// GetAction returns the Action field if it's non-nil, zero value otherwise.
func (p *Page) GetAction() string {
if p == nil || p.Action == nil {
return ""
@@ -3372,6 +3548,14 @@ func (p *Project) GetURL() string {
return *p.URL
}
+// GetColumnID returns the ColumnID field if it's non-nil, zero value otherwise.
+func (p *ProjectCard) GetColumnID() int {
+ if p == nil || p.ColumnID == nil {
+ return 0
+ }
+ return *p.ColumnID
+}
+
// GetColumnURL returns the ColumnURL field if it's non-nil, zero value otherwise.
func (p *ProjectCard) GetColumnURL() string {
if p == nil || p.ColumnURL == nil {
@@ -3420,6 +3604,14 @@ func (p *ProjectCard) GetUpdatedAt() Timestamp {
return *p.UpdatedAt
}
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (p *ProjectCard) GetURL() string {
+ if p == nil || p.URL == nil {
+ return ""
+ }
+ return *p.URL
+}
+
// GetAction returns the Action field if it's non-nil, zero value otherwise.
func (p *ProjectCardEvent) GetAction() string {
if p == nil || p.Action == nil {
@@ -3612,6 +3804,14 @@ func (p *PullRequest) GetMergeable() bool {
return *p.Mergeable
}
+// GetMergeCommitSHA returns the MergeCommitSHA field if it's non-nil, zero value otherwise.
+func (p *PullRequest) GetMergeCommitSHA() string {
+ if p == nil || p.MergeCommitSHA == nil {
+ return ""
+ }
+ return *p.MergeCommitSHA
+}
+
// GetMerged returns the Merged field if it's non-nil, zero value otherwise.
func (p *PullRequest) GetMerged() bool {
if p == nil || p.Merged == nil {
@@ -3996,6 +4196,14 @@ func (p *PullRequestReviewRequest) GetBody() string {
return *p.Body
}
+// GetCommitID returns the CommitID field if it's non-nil, zero value otherwise.
+func (p *PullRequestReviewRequest) GetCommitID() string {
+ if p == nil || p.CommitID == nil {
+ return ""
+ }
+ return *p.CommitID
+}
+
// GetEvent returns the Event field if it's non-nil, zero value otherwise.
func (p *PullRequestReviewRequest) GetEvent() string {
if p == nil || p.Event == nil {
@@ -4004,6 +4212,14 @@ func (p *PullRequestReviewRequest) GetEvent() string {
return *p.Event
}
+// GetDismissStaleReviews returns the DismissStaleReviews field if it's non-nil, zero value otherwise.
+func (p *PullRequestReviewsEnforcementUpdate) GetDismissStaleReviews() bool {
+ if p == nil || p.DismissStaleReviews == nil {
+ return false
+ }
+ return *p.DismissStaleReviews
+}
+
// GetBase returns the Base field if it's non-nil, zero value otherwise.
func (p *pullRequestUpdate) GetBase() string {
if p == nil || p.Base == nil {
@@ -6292,6 +6508,14 @@ func (t *Team) GetURL() string {
return *t.URL
}
+// GetAction returns the Action field if it's non-nil, zero value otherwise.
+func (t *TeamEvent) GetAction() string {
+ if t == nil || t.Action == nil {
+ return ""
+ }
+ return *t.Action
+}
+
// GetDescription returns the Description field if it's non-nil, zero value otherwise.
func (t *TeamLDAPMapping) GetDescription() string {
if t == nil || t.Description == nil {
@@ -6620,6 +6844,14 @@ func (t *TreeEntry) GetType() string {
return *t.Type
}
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (t *TreeEntry) GetURL() string {
+ if t == nil || t.URL == nil {
+ return ""
+ }
+ return *t.URL
+}
+
// GetForce returns the Force field if it's non-nil, zero value otherwise.
func (u *updateRefRequest) GetForce() bool {
if u == nil || u.Force == nil {
diff --git a/vendor/github.com/google/go-github/github/github.go b/vendor/github.com/google/go-github/github/github.go
index decdaaa..54769bc 100644
--- a/vendor/github.com/google/go-github/github/github.go
+++ b/vendor/github.com/google/go-github/github/github.go
@@ -27,7 +27,7 @@ import (
)
const (
- libraryVersion = "6"
+ libraryVersion = "13"
defaultBaseURL = "https://api.github.com/"
uploadBaseURL = "https://uploads.github.com/"
userAgent = "go-github/" + libraryVersion
@@ -67,10 +67,6 @@ const (
// https://developer.github.com/changes/2016-05-12-reactions-api-preview/
mediaTypeReactionsPreview = "application/vnd.github.squirrel-girl-preview"
- // https://developer.github.com/changes/2016-04-01-squash-api-preview/
- // https://developer.github.com/changes/2016-09-26-pull-request-merge-api-update/
- mediaTypeSquashPreview = "application/vnd.github.polaris-preview+json"
-
// https://developer.github.com/changes/2016-04-04-git-signing-api-preview/
mediaTypeGitSigningPreview = "application/vnd.github.cryptographer-preview+json"
@@ -92,11 +88,20 @@ const (
// https://developer.github.com/changes/2017-01-05-commit-search-api/
mediaTypeCommitSearchPreview = "application/vnd.github.cloak-preview+json"
- // https://developer.github.com/changes/2016-12-14-reviews-api/
- mediaTypePullRequestReviewsPreview = "application/vnd.github.black-cat-preview+json"
-
// https://developer.github.com/changes/2017-02-28-user-blocking-apis-and-webhook/
mediaTypeBlockUsersPreview = "application/vnd.github.giant-sentry-fist-preview+json"
+
+ // https://developer.github.com/changes/2017-02-09-community-health/
+ mediaTypeRepositoryCommunityHealthMetricsPreview = "application/vnd.github.black-panther-preview+json"
+
+ // https://developer.github.com/changes/2017-05-23-coc-api/
+ mediaTypeCodesOfConductPreview = "application/vnd.github.scarlet-witch-preview+json"
+
+ // https://developer.github.com/changes/2017-07-17-update-topics-on-repositories/
+ mediaTypeTopicsPreview = "application/vnd.github.mercy-preview+json"
+
+ // https://developer.github.com/changes/2017-07-26-team-review-request-thor-preview/
+ mediaTypeTeamReviewPreview = "application/vnd.github.thor-preview+json"
)
// A Client manages communication with the GitHub API.
@@ -123,11 +128,11 @@ type Client struct {
// Services used for talking to different parts of the GitHub API.
Activity *ActivityService
Admin *AdminService
+ Apps *AppsService
Authorizations *AuthorizationsService
Gists *GistsService
Git *GitService
Gitignores *GitignoresService
- Integrations *IntegrationsService
Issues *IssuesService
Organizations *OrganizationsService
Projects *ProjectsService
@@ -212,11 +217,11 @@ func NewClient(httpClient *http.Client) *Client {
c.common.client = c
c.Activity = (*ActivityService)(&c.common)
c.Admin = (*AdminService)(&c.common)
+ c.Apps = (*AppsService)(&c.common)
c.Authorizations = (*AuthorizationsService)(&c.common)
c.Gists = (*GistsService)(&c.common)
c.Git = (*GitService)(&c.common)
c.Gitignores = (*GitignoresService)(&c.common)
- c.Integrations = (*IntegrationsService)(&c.common)
c.Issues = (*IssuesService)(&c.common)
c.Licenses = (*LicensesService)(&c.common)
c.Migrations = (*MigrationService)(&c.common)
@@ -236,13 +241,14 @@ func NewClient(httpClient *http.Client) *Client {
// specified, the value pointed to by body is JSON encoded and included as the
// request body.
func (c *Client) NewRequest(method, urlStr string, body interface{}) (*http.Request, error) {
- rel, err := url.Parse(urlStr)
+ if !strings.HasSuffix(c.BaseURL.Path, "/") {
+ return nil, fmt.Errorf("BaseURL must have a trailing slash, but %q does not", c.BaseURL)
+ }
+ u, err := c.BaseURL.Parse(urlStr)
if err != nil {
return nil, err
}
- u := c.BaseURL.ResolveReference(rel)
-
var buf io.ReadWriter
if body != nil {
buf = new(bytes.Buffer)
@@ -271,12 +277,14 @@ func (c *Client) NewRequest(method, urlStr string, body interface{}) (*http.Requ
// urlStr, in which case it is resolved relative to the UploadURL of the Client.
// Relative URLs should always be specified without a preceding slash.
func (c *Client) NewUploadRequest(urlStr string, reader io.Reader, size int64, mediaType string) (*http.Request, error) {
- rel, err := url.Parse(urlStr)
+ if !strings.HasSuffix(c.UploadURL.Path, "/") {
+ return nil, fmt.Errorf("UploadURL must have a trailing slash, but %q does not", c.UploadURL)
+ }
+ u, err := c.UploadURL.Parse(urlStr)
if err != nil {
return nil, err
}
- u := c.UploadURL.ResolveReference(rel)
req, err := http.NewRequest("POST", u.String(), reader)
if err != nil {
return nil, err
@@ -312,6 +320,7 @@ type Response struct {
}
// newResponse creates a new Response for the provided http.Response.
+// r must not be nil.
func newResponse(r *http.Response) *Response {
response := &Response{Response: r}
response.populatePageValues()
@@ -390,13 +399,16 @@ func parseRate(r *http.Response) Rate {
// The provided ctx must be non-nil. If it is canceled or times out,
// ctx.Err() will be returned.
func (c *Client) Do(ctx context.Context, req *http.Request, v interface{}) (*Response, error) {
- ctx, req = withContext(ctx, req)
+ req = withContext(ctx, req)
rateLimitCategory := category(req.URL.Path)
// If we've hit rate limit, don't make further requests before Reset time.
if err := c.checkRateLimitBeforeDo(req, rateLimitCategory); err != nil {
- return nil, err
+ return &Response{
+ Response: err.Response,
+ Rate: err.Rate,
+ }, err
}
resp, err := c.client.Do(req)
@@ -457,7 +469,7 @@ func (c *Client) Do(ctx context.Context, req *http.Request, v interface{}) (*Res
// current client state in order to quickly check if *RateLimitError can be immediately returned
// from Client.Do, and if so, returns it so that Client.Do can skip making a network API call unnecessarily.
// Otherwise it returns nil, and Client.Do should proceed normally.
-func (c *Client) checkRateLimitBeforeDo(req *http.Request, rateLimitCategory rateLimitCategory) error {
+func (c *Client) checkRateLimitBeforeDo(req *http.Request, rateLimitCategory rateLimitCategory) *RateLimitError {
c.rateMu.Lock()
rate := c.rateLimits[rateLimitCategory]
c.rateMu.Unlock()
@@ -524,9 +536,9 @@ type RateLimitError struct {
}
func (r *RateLimitError) Error() string {
- return fmt.Sprintf("%v %v: %d %v; rate reset in %v",
+ return fmt.Sprintf("%v %v: %d %v %v",
r.Response.Request.Method, sanitizeURL(r.Response.Request.URL),
- r.Response.StatusCode, r.Message, r.Rate.Reset.Time.Sub(time.Now()))
+ r.Response.StatusCode, r.Message, formatRateReset(r.Rate.Reset.Time.Sub(time.Now())))
}
// AcceptedError occurs when GitHub returns 202 Accepted response with an
@@ -874,6 +886,31 @@ func cloneRequest(r *http.Request) *http.Request {
return r2
}
+// formatRateReset formats d to look like "[rate reset in 2s]" or
+// "[rate reset in 87m02s]" for the positive durations. And like "[rate limit was reset 87m02s ago]"
+// for the negative cases.
+func formatRateReset(d time.Duration) string {
+ isNegative := d < 0
+ if isNegative {
+ d *= -1
+ }
+ secondsTotal := int(0.5 + d.Seconds())
+ minutes := secondsTotal / 60
+ seconds := secondsTotal - minutes*60
+
+ var timeString string
+ if minutes > 0 {
+ timeString = fmt.Sprintf("%dm%02ds", minutes, seconds)
+ } else {
+ timeString = fmt.Sprintf("%ds", seconds)
+ }
+
+ if isNegative {
+ return fmt.Sprintf("[rate limit was reset %v ago]", timeString)
+ }
+ return fmt.Sprintf("[rate reset in %v]", timeString)
+}
+
// Bool is a helper routine that allocates a new bool value
// to store v and returns a pointer to it.
func Bool(v bool) *bool { return &v }
diff --git a/vendor/github.com/google/go-github/github/integration_installation.go b/vendor/github.com/google/go-github/github/integration_installation.go
deleted file mode 100644
index 0836d86..0000000
--- a/vendor/github.com/google/go-github/github/integration_installation.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2016 The go-github AUTHORS. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package github
-
-import "context"
-
-// Installation represents a GitHub integration installation.
-type Installation struct {
- ID *int `json:"id,omitempty"`
- Account *User `json:"account,omitempty"`
- AccessTokensURL *string `json:"access_tokens_url,omitempty"`
- RepositoriesURL *string `json:"repositories_url,omitempty"`
- HTMLURL *string `json:"html_url,omitempty"`
-}
-
-func (i Installation) String() string {
- return Stringify(i)
-}
-
-// ListRepos lists the repositories that the current installation has access to.
-//
-// GitHub API docs: https://developer.github.com/v3/integrations/installations/#list-repositories
-func (s *IntegrationsService) ListRepos(ctx context.Context, opt *ListOptions) ([]*Repository, *Response, error) {
- u, err := addOptions("installation/repositories", opt)
- if err != nil {
- return nil, nil, err
- }
-
- req, err := s.client.NewRequest("GET", u, nil)
- if err != nil {
- return nil, nil, err
- }
-
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeIntegrationPreview)
-
- var r struct {
- Repositories []*Repository `json:"repositories"`
- }
- resp, err := s.client.Do(ctx, req, &r)
- if err != nil {
- return nil, resp, err
- }
-
- return r.Repositories, resp, nil
-}
diff --git a/vendor/github.com/google/go-github/github/issues.go b/vendor/github.com/google/go-github/github/issues.go
index b437d50..9b7a9d6 100644
--- a/vendor/github.com/google/go-github/github/issues.go
+++ b/vendor/github.com/google/go-github/github/issues.go
@@ -23,6 +23,7 @@ type IssuesService service
// but not every issue is a pull request. Some endpoints, events, and webhooks
// may also return pull requests via this struct. If PullRequestLinks is nil,
// this is an issue, and if PullRequestLinks is not nil, this is a pull request.
+// The IsPullRequest helper method can be used to check that.
type Issue struct {
ID *int `json:"id,omitempty"`
Number *int `json:"number,omitempty"`
@@ -40,6 +41,10 @@ type Issue struct {
ClosedBy *User `json:"closed_by,omitempty"`
URL *string `json:"url,omitempty"`
HTMLURL *string `json:"html_url,omitempty"`
+ CommentsURL *string `json:"comments_url,omitempty"`
+ EventsURL *string `json:"events_url,omitempty"`
+ LabelsURL *string `json:"labels_url,omitempty"`
+ RepositoryURL *string `json:"repository_url,omitempty"`
Milestone *Milestone `json:"milestone,omitempty"`
PullRequestLinks *PullRequestLinks `json:"pull_request,omitempty"`
Repository *Repository `json:"repository,omitempty"`
@@ -55,6 +60,13 @@ func (i Issue) String() string {
return Stringify(i)
}
+// IsPullRequest reports whether the issue is also a pull request. It uses the
+// method recommended by GitHub's API documentation, which is to check whether
+// PullRequestLinks is non-nil.
+func (i Issue) IsPullRequest() bool {
+ return i.PullRequestLinks != nil
+}
+
// IssueRequest represents a request to create/edit an issue.
// It is separate from Issue above because otherwise Labels
// and Assignee fail to serialize to the correct JSON.
@@ -97,7 +109,7 @@ type IssueListOptions struct {
}
// PullRequestLinks object is added to the Issue object when it's an issue included
-// in the IssueCommentEvent webhook payload, if the webhooks is fired by a comment on a PR
+// in the IssueCommentEvent webhook payload, if the webhook is fired by a comment on a PR.
type PullRequestLinks struct {
URL *string `json:"url,omitempty"`
HTMLURL *string `json:"html_url,omitempty"`
diff --git a/vendor/github.com/google/go-github/github/issues_events.go b/vendor/github.com/google/go-github/github/issues_events.go
index 2d5e19a..93e5d66 100644
--- a/vendor/github.com/google/go-github/github/issues_events.go
+++ b/vendor/github.com/google/go-github/github/issues_events.go
@@ -44,7 +44,7 @@ type IssueEvent struct {
// Someone unspecified @mentioned the Actor [sic] in an issue comment body.
//
// assigned, unassigned
- // The Actor assigned the issue to or removed the assignment from the Assignee.
+ // The Assigner assigned the issue to or removed the assignment from the Assignee.
//
// labeled, unlabeled
// The Actor added or removed the Label from the issue.
diff --git a/vendor/github.com/google/go-github/github/issues_labels.go b/vendor/github.com/google/go-github/github/issues_labels.go
index a39001d..b0e34c4 100644
--- a/vendor/github.com/google/go-github/github/issues_labels.go
+++ b/vendor/github.com/google/go-github/github/issues_labels.go
@@ -19,7 +19,7 @@ type Label struct {
}
func (l Label) String() string {
- return fmt.Sprint(*l.Name)
+ return Stringify(l)
}
// ListLabels lists all labels for a repository.
diff --git a/vendor/github.com/google/go-github/github/messages.go b/vendor/github.com/google/go-github/github/messages.go
index a7ec65f..3a4d464 100644
--- a/vendor/github.com/google/go-github/github/messages.go
+++ b/vendor/github.com/google/go-github/github/messages.go
@@ -20,6 +20,7 @@ import (
"hash"
"io/ioutil"
"net/http"
+ "net/url"
"strings"
)
@@ -33,42 +34,46 @@ const (
signatureHeader = "X-Hub-Signature"
// eventTypeHeader is the GitHub header key used to pass the event type.
eventTypeHeader = "X-Github-Event"
+ // deliveryIDHeader is the GitHub header key used to pass the unique ID for the webhook event.
+ deliveryIDHeader = "X-Github-Delivery"
)
var (
// eventTypeMapping maps webhooks types to their corresponding go-github struct types.
eventTypeMapping = map[string]string{
- "commit_comment": "CommitCommentEvent",
- "create": "CreateEvent",
- "delete": "DeleteEvent",
- "deployment": "DeploymentEvent",
- "deployment_status": "DeploymentStatusEvent",
- "fork": "ForkEvent",
- "gollum": "GollumEvent",
- "integration_installation": "IntegrationInstallationEvent",
- "integration_installation_repositories": "IntegrationInstallationRepositoriesEvent",
- "issue_comment": "IssueCommentEvent",
- "issues": "IssuesEvent",
- "label": "LabelEvent",
- "member": "MemberEvent",
- "membership": "MembershipEvent",
- "milestone": "MilestoneEvent",
- "organization": "OrganizationEvent",
- "page_build": "PageBuildEvent",
- "ping": "PingEvent",
- "project": "ProjectEvent",
- "project_card": "ProjectCardEvent",
- "project_column": "ProjectColumnEvent",
- "public": "PublicEvent",
- "pull_request_review": "PullRequestReviewEvent",
- "pull_request_review_comment": "PullRequestReviewCommentEvent",
- "pull_request": "PullRequestEvent",
- "push": "PushEvent",
- "repository": "RepositoryEvent",
- "release": "ReleaseEvent",
- "status": "StatusEvent",
- "team_add": "TeamAddEvent",
- "watch": "WatchEvent",
+ "commit_comment": "CommitCommentEvent",
+ "create": "CreateEvent",
+ "delete": "DeleteEvent",
+ "deployment": "DeploymentEvent",
+ "deployment_status": "DeploymentStatusEvent",
+ "fork": "ForkEvent",
+ "gollum": "GollumEvent",
+ "installation": "InstallationEvent",
+ "installation_repositories": "InstallationRepositoriesEvent",
+ "issue_comment": "IssueCommentEvent",
+ "issues": "IssuesEvent",
+ "label": "LabelEvent",
+ "member": "MemberEvent",
+ "membership": "MembershipEvent",
+ "milestone": "MilestoneEvent",
+ "organization": "OrganizationEvent",
+ "org_block": "OrgBlockEvent",
+ "page_build": "PageBuildEvent",
+ "ping": "PingEvent",
+ "project": "ProjectEvent",
+ "project_card": "ProjectCardEvent",
+ "project_column": "ProjectColumnEvent",
+ "public": "PublicEvent",
+ "pull_request_review": "PullRequestReviewEvent",
+ "pull_request_review_comment": "PullRequestReviewCommentEvent",
+ "pull_request": "PullRequestEvent",
+ "push": "PushEvent",
+ "repository": "RepositoryEvent",
+ "release": "ReleaseEvent",
+ "status": "StatusEvent",
+ "team": "TeamEvent",
+ "team_add": "TeamAddEvent",
+ "watch": "WatchEvent",
}
)
@@ -118,6 +123,8 @@ func messageMAC(signature string) ([]byte, func() hash.Hash, error) {
// ValidatePayload validates an incoming GitHub Webhook event request
// and returns the (JSON) payload.
+// The Content-Type header of the payload can be "application/json" or "application/x-www-form-urlencoded".
+// If the Content-Type is neither then an error is returned.
// secretKey is the GitHub Webhook secret message.
//
// Example usage:
@@ -129,13 +136,43 @@ func messageMAC(signature string) ([]byte, func() hash.Hash, error) {
// }
//
func ValidatePayload(r *http.Request, secretKey []byte) (payload []byte, err error) {
- payload, err = ioutil.ReadAll(r.Body)
- if err != nil {
- return nil, err
+ var body []byte // Raw body that GitHub uses to calculate the signature.
+
+ switch ct := r.Header.Get("Content-Type"); ct {
+ case "application/json":
+ var err error
+ if body, err = ioutil.ReadAll(r.Body); err != nil {
+ return nil, err
+ }
+
+ // If the content type is application/json,
+ // the JSON payload is just the original body.
+ payload = body
+
+ case "application/x-www-form-urlencoded":
+ // payloadFormParam is the name of the form parameter that the JSON payload
+ // will be in if a webhook has its content type set to application/x-www-form-urlencoded.
+ const payloadFormParam = "payload"
+
+ var err error
+ if body, err = ioutil.ReadAll(r.Body); err != nil {
+ return nil, err
+ }
+
+ // If the content type is application/x-www-form-urlencoded,
+ // the JSON payload will be under the "payload" form param.
+ form, err := url.ParseQuery(string(body))
+ if err != nil {
+ return nil, err
+ }
+ payload = []byte(form.Get(payloadFormParam))
+
+ default:
+ return nil, fmt.Errorf("Webhook request has unsupported Content-Type %q", ct)
}
sig := r.Header.Get(signatureHeader)
- if err := validateSignature(sig, payload, secretKey); err != nil {
+ if err := validateSignature(sig, body, secretKey); err != nil {
return nil, err
}
return payload, nil
@@ -159,10 +196,19 @@ func validateSignature(signature string, payload, secretKey []byte) error {
}
// WebHookType returns the event type of webhook request r.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/hooks/#webhook-headers
func WebHookType(r *http.Request) string {
return r.Header.Get(eventTypeHeader)
}
+// DeliveryID returns the unique delivery ID of webhook request r.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/hooks/#webhook-headers
+func DeliveryID(r *http.Request) string {
+ return r.Header.Get(deliveryIDHeader)
+}
+
// ParseWebHook parses the event payload. For recognized event types, a
// value of the corresponding struct type will be returned (as returned
// by Event.ParsePayload()). An error will be returned for unrecognized event
diff --git a/vendor/github.com/google/go-github/github/misc.go b/vendor/github.com/google/go-github/github/misc.go
index 42d0d30..5b8082d 100644
--- a/vendor/github.com/google/go-github/github/misc.go
+++ b/vendor/github.com/google/go-github/github/misc.go
@@ -83,6 +83,61 @@ func (c *Client) ListEmojis(ctx context.Context) (map[string]string, *Response,
return emoji, resp, nil
}
+// CodeOfConduct represents a code of conduct.
+type CodeOfConduct struct {
+ Name *string `json:"name,omitempty"`
+ Key *string `json:"key,omitempty"`
+ URL *string `json:"url,omitempty"`
+ Body *string `json:"body,omitempty"`
+}
+
+func (c *CodeOfConduct) String() string {
+ return Stringify(c)
+}
+
+// ListCodesOfConduct returns all codes of conduct.
+//
+// GitHub API docs: https://developer.github.com/v3/codes_of_conduct/#list-all-codes-of-conduct
+func (c *Client) ListCodesOfConduct(ctx context.Context) ([]*CodeOfConduct, *Response, error) {
+ req, err := c.NewRequest("GET", "codes_of_conduct", nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeCodesOfConductPreview)
+
+ var cs []*CodeOfConduct
+ resp, err := c.Do(ctx, req, &cs)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return cs, resp, nil
+}
+
+// GetCodeOfConduct returns an individual code of conduct.
+//
+// https://developer.github.com/v3/codes_of_conduct/#get-an-individual-code-of-conduct
+func (c *Client) GetCodeOfConduct(ctx context.Context, key string) (*CodeOfConduct, *Response, error) {
+ u := fmt.Sprintf("codes_of_conduct/%s", key)
+ req, err := c.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeCodesOfConductPreview)
+
+ coc := new(CodeOfConduct)
+ resp, err := c.Do(ctx, req, coc)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return coc, resp, nil
+}
+
// APIMeta represents metadata about the GitHub API.
type APIMeta struct {
// An Array of IP addresses in CIDR format specifying the addresses
diff --git a/vendor/github.com/google/go-github/github/orgs.go b/vendor/github.com/google/go-github/github/orgs.go
index 8b126f0..e6947c9 100644
--- a/vendor/github.com/google/go-github/github/orgs.go
+++ b/vendor/github.com/google/go-github/github/orgs.go
@@ -154,6 +154,25 @@ func (s *OrganizationsService) Get(ctx context.Context, org string) (*Organizati
return organization, resp, nil
}
+// GetByID fetches an organization.
+//
+// Note: GetByID uses the undocumented GitHub API endpoint /organizations/:id.
+func (s *OrganizationsService) GetByID(ctx context.Context, id int) (*Organization, *Response, error) {
+ u := fmt.Sprintf("organizations/%d", id)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ organization := new(Organization)
+ resp, err := s.client.Do(ctx, req, organization)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return organization, resp, nil
+}
+
// Edit an organization.
//
// GitHub API docs: https://developer.github.com/v3/orgs/#edit-an-organization
diff --git a/vendor/github.com/google/go-github/github/orgs_members.go b/vendor/github.com/google/go-github/github/orgs_members.go
index f1209c7..d0ea6a9 100644
--- a/vendor/github.com/google/go-github/github/orgs_members.go
+++ b/vendor/github.com/google/go-github/github/orgs_members.go
@@ -278,7 +278,7 @@ func (s *OrganizationsService) RemoveOrgMembership(ctx context.Context, user, or
// ListPendingOrgInvitations returns a list of pending invitations.
//
// GitHub API docs: https://developer.github.com/v3/orgs/members/#list-pending-organization-invitations
-func (s *OrganizationsService) ListPendingOrgInvitations(ctx context.Context, org int, opt *ListOptions) ([]*Invitation, *Response, error) {
+func (s *OrganizationsService) ListPendingOrgInvitations(ctx context.Context, org string, opt *ListOptions) ([]*Invitation, *Response, error) {
u := fmt.Sprintf("orgs/%v/invitations", org)
u, err := addOptions(u, opt)
if err != nil {
diff --git a/vendor/github.com/google/go-github/github/orgs_teams.go b/vendor/github.com/google/go-github/github/orgs_teams.go
index 70b090d..684e2da 100644
--- a/vendor/github.com/google/go-github/github/orgs_teams.go
+++ b/vendor/github.com/google/go-github/github/orgs_teams.go
@@ -223,6 +223,9 @@ func (s *OrganizationsService) ListTeamRepos(ctx context.Context, team int, opt
return nil, nil, err
}
+ // TODO: remove custom Accept header when topics API fully launches.
+ req.Header.Set("Accept", mediaTypeTopicsPreview)
+
var repos []*Repository
resp, err := s.client.Do(ctx, req, &repos)
if err != nil {
diff --git a/vendor/github.com/google/go-github/github/orgs_users_blocking.go b/vendor/github.com/google/go-github/github/orgs_users_blocking.go
new file mode 100644
index 0000000..b1aecf4
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/orgs_users_blocking.go
@@ -0,0 +1,91 @@
+// Copyright 2017 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "context"
+ "fmt"
+)
+
+// ListBlockedUsers lists all the users blocked by an organization.
+//
+// GitHub API docs: https://developer.github.com/v3/orgs/blocking/#list-blocked-users
+func (s *OrganizationsService) ListBlockedUsers(ctx context.Context, org string, opt *ListOptions) ([]*User, *Response, error) {
+ u := fmt.Sprintf("orgs/%v/blocks", org)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeBlockUsersPreview)
+
+ var blockedUsers []*User
+ resp, err := s.client.Do(ctx, req, &blockedUsers)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return blockedUsers, resp, nil
+}
+
+// IsBlocked reports whether specified user is blocked from an organization.
+//
+// GitHub API docs: https://developer.github.com/v3/orgs/blocking/#check-whether-a-user-is-blocked-from-an-organization
+func (s *OrganizationsService) IsBlocked(ctx context.Context, org string, user string) (bool, *Response, error) {
+ u := fmt.Sprintf("orgs/%v/blocks/%v", org, user)
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return false, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeBlockUsersPreview)
+
+ resp, err := s.client.Do(ctx, req, nil)
+ isBlocked, err := parseBoolResponse(err)
+ return isBlocked, resp, err
+}
+
+// BlockUser blocks specified user from an organization.
+//
+// GitHub API docs: https://developer.github.com/v3/orgs/blocking/#block-a-user
+func (s *OrganizationsService) BlockUser(ctx context.Context, org string, user string) (*Response, error) {
+ u := fmt.Sprintf("orgs/%v/blocks/%v", org, user)
+
+ req, err := s.client.NewRequest("PUT", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeBlockUsersPreview)
+
+ return s.client.Do(ctx, req, nil)
+}
+
+// UnblockUser unblocks specified user from an organization.
+//
+// GitHub API docs: https://developer.github.com/v3/orgs/blocking/#unblock-a-user
+func (s *OrganizationsService) UnblockUser(ctx context.Context, org string, user string) (*Response, error) {
+ u := fmt.Sprintf("orgs/%v/blocks/%v", org, user)
+
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeBlockUsersPreview)
+
+ return s.client.Do(ctx, req, nil)
+}
diff --git a/vendor/github.com/google/go-github/github/projects.go b/vendor/github.com/google/go-github/github/projects.go
index 58b638e..a9b143d 100644
--- a/vendor/github.com/google/go-github/github/projects.go
+++ b/vendor/github.com/google/go-github/github/projects.go
@@ -65,6 +65,12 @@ type ProjectOptions struct {
Name string `json:"name,omitempty"`
// The body of the project. (Optional.)
Body string `json:"body,omitempty"`
+
+ // The following field(s) are only applicable for update.
+ // They should be left with zero values for creation.
+
+ // State of the project. Either "open" or "closed". (Optional.)
+ State string `json:"state,omitempty"`
}
// UpdateProject updates a repository project.
@@ -259,14 +265,19 @@ func (s *ProjectsService) MoveProjectColumn(ctx context.Context, columnID int, o
// ProjectCard represents a card in a column of a GitHub Project.
//
-// GitHub API docs: https://developer.github.com/v3/repos/projects/
+// GitHub API docs: https://developer.github.com/v3/projects/cards/#get-a-project-card
type ProjectCard struct {
+ URL *string `json:"url,omitempty"`
ColumnURL *string `json:"column_url,omitempty"`
ContentURL *string `json:"content_url,omitempty"`
ID *int `json:"id,omitempty"`
Note *string `json:"note,omitempty"`
+ Creator *User `json:"creator,omitempty"`
CreatedAt *Timestamp `json:"created_at,omitempty"`
UpdatedAt *Timestamp `json:"updated_at,omitempty"`
+
+ // The following fields are only populated by Webhook events.
+ ColumnID *int `json:"column_id,omitempty"`
}
// ListProjectCards lists the cards in a column of a GitHub Project.
@@ -324,10 +335,10 @@ func (s *ProjectsService) GetProjectCard(ctx context.Context, columnID int) (*Pr
type ProjectCardOptions struct {
// The note of the card. Note and ContentID are mutually exclusive.
Note string `json:"note,omitempty"`
- // The ID (not Number) of the Issue or Pull Request to associate with this card.
+ // The ID (not Number) of the Issue to associate with this card.
// Note and ContentID are mutually exclusive.
ContentID int `json:"content_id,omitempty"`
- // The type of content to associate with this card. Possible values are: "Issue", "PullRequest".
+ // The type of content to associate with this card. Possible values are: "Issue".
ContentType string `json:"content_type,omitempty"`
}
diff --git a/vendor/github.com/google/go-github/github/pulls.go b/vendor/github.com/google/go-github/github/pulls.go
index 0fdb4cd..bc46081 100644
--- a/vendor/github.com/google/go-github/github/pulls.go
+++ b/vendor/github.com/google/go-github/github/pulls.go
@@ -33,6 +33,7 @@ type PullRequest struct {
Merged *bool `json:"merged,omitempty"`
Mergeable *bool `json:"mergeable,omitempty"`
MergedBy *User `json:"merged_by,omitempty"`
+ MergeCommitSHA *string `json:"merge_commit_sha,omitempty"`
Comments *int `json:"comments,omitempty"`
Commits *int `json:"commits,omitempty"`
Additions *int `json:"additions,omitempty"`
@@ -253,6 +254,9 @@ func (s *PullRequestsService) ListCommits(ctx context.Context, owner string, rep
return nil, nil, err
}
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeGitSigningPreview)
+
var commits []*RepositoryCommit
resp, err := s.client.Do(ctx, req, &commits)
if err != nil {
@@ -342,9 +346,6 @@ func (s *PullRequestsService) Merge(ctx context.Context, owner string, repo stri
return nil, nil, err
}
- // TODO: This header will be unnecessary when the API is no longer in preview.
- req.Header.Set("Accept", mediaTypeSquashPreview)
-
mergeResult := new(PullRequestMergeResult)
resp, err := s.client.Do(ctx, req, mergeResult)
if err != nil {
diff --git a/vendor/github.com/google/go-github/github/pulls_reviewers.go b/vendor/github.com/google/go-github/github/pulls_reviewers.go
index efa3888..15b0d84 100644
--- a/vendor/github.com/google/go-github/github/pulls_reviewers.go
+++ b/vendor/github.com/google/go-github/github/pulls_reviewers.go
@@ -10,24 +10,30 @@ import (
"fmt"
)
-// RequestReviewers creates a review request for the provided GitHub users for the specified pull request.
+// ReviewersRequest specifies users and teams for a pull request review request.
+type ReviewersRequest struct {
+ Reviewers []string `json:"reviewers,omitempty"`
+ TeamReviewers []string `json:"team_reviewers,omitempty"`
+}
+
+// Reviewers represents reviewers of a pull request.
+type Reviewers struct {
+ Users []*User `json:"users,omitempty"`
+ Teams []*Team `json:"teams,omitempty"`
+}
+
+// RequestReviewers creates a review request for the provided reviewers for the specified pull request.
//
// GitHub API docs: https://developer.github.com/v3/pulls/review_requests/#create-a-review-request
-func (s *PullRequestsService) RequestReviewers(ctx context.Context, owner, repo string, number int, logins []string) (*PullRequest, *Response, error) {
+func (s *PullRequestsService) RequestReviewers(ctx context.Context, owner, repo string, number int, reviewers ReviewersRequest) (*PullRequest, *Response, error) {
u := fmt.Sprintf("repos/%s/%s/pulls/%d/requested_reviewers", owner, repo, number)
-
- reviewers := struct {
- Reviewers []string `json:"reviewers,omitempty"`
- }{
- Reviewers: logins,
- }
req, err := s.client.NewRequest("POST", u, &reviewers)
if err != nil {
return nil, nil, err
}
- // TODO: remove custom Accept header when this API fully launches
- req.Header.Set("Accept", mediaTypePullRequestReviewsPreview)
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeTeamReviewPreview)
r := new(PullRequest)
resp, err := s.client.Do(ctx, req, r)
@@ -38,47 +44,45 @@ func (s *PullRequestsService) RequestReviewers(ctx context.Context, owner, repo
return r, resp, nil
}
-// ListReviewers lists users whose reviews have been requested on the specified pull request.
+// ListReviewers lists reviewers whose reviews have been requested on the specified pull request.
//
// GitHub API docs: https://developer.github.com/v3/pulls/review_requests/#list-review-requests
-func (s *PullRequestsService) ListReviewers(ctx context.Context, owner, repo string, number int) ([]*User, *Response, error) {
+func (s *PullRequestsService) ListReviewers(ctx context.Context, owner, repo string, number int, opt *ListOptions) (*Reviewers, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/pulls/%d/requested_reviewers", owner, repo, number)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
- // TODO: remove custom Accept header when this API fully launches
- req.Header.Set("Accept", mediaTypePullRequestReviewsPreview)
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeTeamReviewPreview)
- var users []*User
- resp, err := s.client.Do(ctx, req, &users)
+ reviewers := new(Reviewers)
+ resp, err := s.client.Do(ctx, req, reviewers)
if err != nil {
return nil, resp, err
}
- return users, resp, nil
+ return reviewers, resp, nil
}
-// RemoveReviewers removes the review request for the provided GitHub users for the specified pull request.
+// RemoveReviewers removes the review request for the provided reviewers for the specified pull request.
//
// GitHub API docs: https://developer.github.com/v3/pulls/review_requests/#delete-a-review-request
-func (s *PullRequestsService) RemoveReviewers(ctx context.Context, owner, repo string, number int, logins []string) (*Response, error) {
+func (s *PullRequestsService) RemoveReviewers(ctx context.Context, owner, repo string, number int, reviewers ReviewersRequest) (*Response, error) {
u := fmt.Sprintf("repos/%s/%s/pulls/%d/requested_reviewers", owner, repo, number)
-
- reviewers := struct {
- Reviewers []string `json:"reviewers,omitempty"`
- }{
- Reviewers: logins,
- }
req, err := s.client.NewRequest("DELETE", u, &reviewers)
if err != nil {
return nil, err
}
- // TODO: remove custom Accept header when this API fully launches
- req.Header.Set("Accept", mediaTypePullRequestReviewsPreview)
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeTeamReviewPreview)
return s.client.Do(ctx, req, reviewers)
}
diff --git a/vendor/github.com/google/go-github/github/pulls_reviews.go b/vendor/github.com/google/go-github/github/pulls_reviews.go
index c27b6a8..7d2acc4 100644
--- a/vendor/github.com/google/go-github/github/pulls_reviews.go
+++ b/vendor/github.com/google/go-github/github/pulls_reviews.go
@@ -40,6 +40,7 @@ func (c DraftReviewComment) String() string {
// PullRequestReviewRequest represents a request to create a review.
type PullRequestReviewRequest struct {
+ CommitID *string `json:"commit_id,omitempty"`
Body *string `json:"body,omitempty"`
Event *string `json:"event,omitempty"`
Comments []*DraftReviewComment `json:"comments,omitempty"`
@@ -65,17 +66,18 @@ func (r PullRequestReviewDismissalRequest) String() string {
// Read more about it here - https://github.com/google/go-github/issues/540
//
// GitHub API docs: https://developer.github.com/v3/pulls/reviews/#list-reviews-on-a-pull-request
-func (s *PullRequestsService) ListReviews(ctx context.Context, owner, repo string, number int) ([]*PullRequestReview, *Response, error) {
+func (s *PullRequestsService) ListReviews(ctx context.Context, owner, repo string, number int, opt *ListOptions) ([]*PullRequestReview, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/pulls/%d/reviews", owner, repo, number)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
- // TODO: remove custom Accept header when this API fully launches
- req.Header.Set("Accept", mediaTypePullRequestReviewsPreview)
-
var reviews []*PullRequestReview
resp, err := s.client.Do(ctx, req, &reviews)
if err != nil {
@@ -100,9 +102,6 @@ func (s *PullRequestsService) GetReview(ctx context.Context, owner, repo string,
return nil, nil, err
}
- // TODO: remove custom Accept header when this API fully launches
- req.Header.Set("Accept", mediaTypePullRequestReviewsPreview)
-
review := new(PullRequestReview)
resp, err := s.client.Do(ctx, req, review)
if err != nil {
@@ -127,9 +126,6 @@ func (s *PullRequestsService) DeletePendingReview(ctx context.Context, owner, re
return nil, nil, err
}
- // TODO: remove custom Accept header when this API fully launches
- req.Header.Set("Accept", mediaTypePullRequestReviewsPreview)
-
review := new(PullRequestReview)
resp, err := s.client.Do(ctx, req, review)
if err != nil {
@@ -145,18 +141,19 @@ func (s *PullRequestsService) DeletePendingReview(ctx context.Context, owner, re
// returned error format and remove this comment once it's fixed.
// Read more about it here - https://github.com/google/go-github/issues/540
//
-// GitHub API docs: https://developer.github.com/v3/pulls/reviews/#get-a-single-reviews-comments
-func (s *PullRequestsService) ListReviewComments(ctx context.Context, owner, repo string, number, reviewID int) ([]*PullRequestComment, *Response, error) {
+// GitHub API docs: https://developer.github.com/v3/pulls/reviews/#get-comments-for-a-single-review
+func (s *PullRequestsService) ListReviewComments(ctx context.Context, owner, repo string, number, reviewID int, opt *ListOptions) ([]*PullRequestComment, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/pulls/%d/reviews/%d/comments", owner, repo, number, reviewID)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
- // TODO: remove custom Accept header when this API fully launches
- req.Header.Set("Accept", mediaTypePullRequestReviewsPreview)
-
var comments []*PullRequestComment
resp, err := s.client.Do(ctx, req, &comments)
if err != nil {
@@ -181,9 +178,6 @@ func (s *PullRequestsService) CreateReview(ctx context.Context, owner, repo stri
return nil, nil, err
}
- // TODO: remove custom Accept header when this API fully launches
- req.Header.Set("Accept", mediaTypePullRequestReviewsPreview)
-
r := new(PullRequestReview)
resp, err := s.client.Do(ctx, req, r)
if err != nil {
@@ -208,9 +202,6 @@ func (s *PullRequestsService) SubmitReview(ctx context.Context, owner, repo stri
return nil, nil, err
}
- // TODO: remove custom Accept header when this API fully launches
- req.Header.Set("Accept", mediaTypePullRequestReviewsPreview)
-
r := new(PullRequestReview)
resp, err := s.client.Do(ctx, req, r)
if err != nil {
@@ -235,9 +226,6 @@ func (s *PullRequestsService) DismissReview(ctx context.Context, owner, repo str
return nil, nil, err
}
- // TODO: remove custom Accept header when this API fully launches
- req.Header.Set("Accept", mediaTypePullRequestReviewsPreview)
-
r := new(PullRequestReview)
resp, err := s.client.Do(ctx, req, r)
if err != nil {
diff --git a/vendor/github.com/google/go-github/github/repos.go b/vendor/github.com/google/go-github/github/repos.go
index 058f149..e3fe26f 100644
--- a/vendor/github.com/google/go-github/github/repos.go
+++ b/vendor/github.com/google/go-github/github/repos.go
@@ -7,6 +7,7 @@ package github
import (
"context"
+ "encoding/json"
"fmt"
"strings"
)
@@ -25,6 +26,7 @@ type Repository struct {
FullName *string `json:"full_name,omitempty"`
Description *string `json:"description,omitempty"`
Homepage *string `json:"homepage,omitempty"`
+ CodeOfConduct *CodeOfConduct `json:"code_of_conduct,omitempty"`
DefaultBranch *string `json:"default_branch,omitempty"`
MasterBranch *string `json:"master_branch,omitempty"`
CreatedAt *Timestamp `json:"created_at,omitempty"`
@@ -53,6 +55,7 @@ type Repository struct {
AllowRebaseMerge *bool `json:"allow_rebase_merge,omitempty"`
AllowSquashMerge *bool `json:"allow_squash_merge,omitempty"`
AllowMergeCommit *bool `json:"allow_merge_commit,omitempty"`
+ Topics []string `json:"topics,omitempty"`
// Only provided when using RepositoriesService.Get while in preview
License *License `json:"license,omitempty"`
@@ -173,8 +176,9 @@ func (s *RepositoriesService) List(ctx context.Context, user string, opt *Reposi
return nil, nil, err
}
- // TODO: remove custom Accept header when license support fully launches
- req.Header.Set("Accept", mediaTypeLicensesPreview)
+ // TODO: remove custom Accept headers when APIs fully launch.
+ acceptHeaders := []string{mediaTypeLicensesPreview, mediaTypeCodesOfConductPreview, mediaTypeTopicsPreview}
+ req.Header.Set("Accept", strings.Join(acceptHeaders, ", "))
var repos []*Repository
resp, err := s.client.Do(ctx, req, &repos)
@@ -210,8 +214,9 @@ func (s *RepositoriesService) ListByOrg(ctx context.Context, org string, opt *Re
return nil, nil, err
}
- // TODO: remove custom Accept header when license support fully launches
- req.Header.Set("Accept", mediaTypeLicensesPreview)
+ // TODO: remove custom Accept headers when APIs fully launch.
+ acceptHeaders := []string{mediaTypeLicensesPreview, mediaTypeCodesOfConductPreview, mediaTypeTopicsPreview}
+ req.Header.Set("Accept", strings.Join(acceptHeaders, ", "))
var repos []*Repository
resp, err := s.client.Do(ctx, req, &repos)
@@ -227,8 +232,6 @@ func (s *RepositoriesService) ListByOrg(ctx context.Context, org string, opt *Re
type RepositoryListAllOptions struct {
// ID of the last repository seen
Since int `url:"since,omitempty"`
-
- ListOptions
}
// ListAll lists all GitHub repositories in the order that they were created.
@@ -293,7 +296,7 @@ func (s *RepositoriesService) Get(ctx context.Context, owner, repo string) (*Rep
// TODO: remove custom Accept header when the license support fully launches
// https://developer.github.com/v3/licenses/#get-a-repositorys-license
- acceptHeaders := []string{mediaTypeLicensesPreview, mediaTypeSquashPreview}
+ acceptHeaders := []string{mediaTypeLicensesPreview, mediaTypeCodesOfConductPreview, mediaTypeTopicsPreview}
req.Header.Set("Accept", strings.Join(acceptHeaders, ", "))
repository := new(Repository)
@@ -305,6 +308,28 @@ func (s *RepositoriesService) Get(ctx context.Context, owner, repo string) (*Rep
return repository, resp, nil
}
+// GetCodeOfConduct gets the contents of a repository's code of conduct.
+//
+// GitHub API docs: https://developer.github.com/v3/codes_of_conduct/#get-the-contents-of-a-repositorys-code-of-conduct
+func (s *RepositoriesService) GetCodeOfConduct(ctx context.Context, owner, repo string) (*CodeOfConduct, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/community/code_of_conduct", owner, repo)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeCodesOfConductPreview)
+
+ coc := new(CodeOfConduct)
+ resp, err := s.client.Do(ctx, req, coc)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return coc, resp, nil
+}
+
// GetByID fetches a repository.
//
// Note: GetByID uses the undocumented GitHub API endpoint /repositories/:id.
@@ -338,9 +363,6 @@ func (s *RepositoriesService) Edit(ctx context.Context, owner, repo string, repo
return nil, nil, err
}
- // TODO: Remove this preview header after API is fully vetted.
- req.Header.Set("Accept", mediaTypeSquashPreview)
-
r := new(Repository)
resp, err := s.client.Do(ctx, req, r)
if err != nil {
@@ -509,22 +531,22 @@ type Branch struct {
// Protection represents a repository branch's protection.
type Protection struct {
- RequiredStatusChecks *RequiredStatusChecks `json:"required_status_checks"`
- RequiredPullRequestReviews *RequiredPullRequestReviews `json:"required_pull_request_reviews"`
- Restrictions *BranchRestrictions `json:"restrictions"`
+ RequiredStatusChecks *RequiredStatusChecks `json:"required_status_checks"`
+ RequiredPullRequestReviews *PullRequestReviewsEnforcement `json:"required_pull_request_reviews"`
+ EnforceAdmins *AdminEnforcement `json:"enforce_admins"`
+ Restrictions *BranchRestrictions `json:"restrictions"`
}
// ProtectionRequest represents a request to create/edit a branch's protection.
type ProtectionRequest struct {
- RequiredStatusChecks *RequiredStatusChecks `json:"required_status_checks"`
- RequiredPullRequestReviews *RequiredPullRequestReviews `json:"required_pull_request_reviews"`
- Restrictions *BranchRestrictionsRequest `json:"restrictions"`
+ RequiredStatusChecks *RequiredStatusChecks `json:"required_status_checks"`
+ RequiredPullRequestReviews *PullRequestReviewsEnforcementRequest `json:"required_pull_request_reviews"`
+ EnforceAdmins bool `json:"enforce_admins"`
+ Restrictions *BranchRestrictionsRequest `json:"restrictions"`
}
// RequiredStatusChecks represents the protection status of a individual branch.
type RequiredStatusChecks struct {
- // Enforce required status checks for repository administrators. (Required.)
- IncludeAdmins bool `json:"include_admins"`
// Require branches to be up to date before merging. (Required.)
Strict bool `json:"strict"`
// The list of status checks to require in order to merge into this
@@ -532,10 +554,71 @@ type RequiredStatusChecks struct {
Contexts []string `json:"contexts"`
}
-// RequiredPullRequestReviews represents the protection configuration for pull requests.
-type RequiredPullRequestReviews struct {
- // Enforce pull request reviews for repository administrators. (Required.)
- IncludeAdmins bool `json:"include_admins"`
+// PullRequestReviewsEnforcement represents the pull request reviews enforcement of a protected branch.
+type PullRequestReviewsEnforcement struct {
+ // Specifies which users and teams can dismiss pull request reviews.
+ DismissalRestrictions DismissalRestrictions `json:"dismissal_restrictions"`
+ // Specifies if approved reviews are dismissed automatically, when a new commit is pushed.
+ DismissStaleReviews bool `json:"dismiss_stale_reviews"`
+ // RequireCodeOwnerReviews specifies if an approved review is required in pull requests including files with a designated code owner.
+ RequireCodeOwnerReviews bool `json:"require_code_owner_reviews"`
+}
+
+// PullRequestReviewsEnforcementRequest represents request to set the pull request review
+// enforcement of a protected branch. It is separate from PullRequestReviewsEnforcement above
+// because the request structure is different from the response structure.
+type PullRequestReviewsEnforcementRequest struct {
+ // Specifies which users and teams should be allowed to dismiss pull request reviews. Can be nil to disable the restrictions.
+ DismissalRestrictionsRequest *DismissalRestrictionsRequest `json:"dismissal_restrictions"`
+ // Specifies if approved reviews can be dismissed automatically, when a new commit is pushed. (Required)
+ DismissStaleReviews bool `json:"dismiss_stale_reviews"`
+ // RequireCodeOwnerReviews specifies if an approved review is required in pull requests including files with a designated code owner.
+ RequireCodeOwnerReviews bool `json:"require_code_owner_reviews"`
+}
+
+// MarshalJSON implements the json.Marshaler interface.
+// Converts nil value of PullRequestReviewsEnforcementRequest.DismissalRestrictionsRequest to empty array
+func (req PullRequestReviewsEnforcementRequest) MarshalJSON() ([]byte, error) {
+ if req.DismissalRestrictionsRequest == nil {
+ newReq := struct {
+ R []interface{} `json:"dismissal_restrictions"`
+ D bool `json:"dismiss_stale_reviews"`
+ O bool `json:"require_code_owner_reviews"`
+ }{
+ R: []interface{}{},
+ D: req.DismissStaleReviews,
+ O: req.RequireCodeOwnerReviews,
+ }
+ return json.Marshal(newReq)
+ }
+ newReq := struct {
+ R *DismissalRestrictionsRequest `json:"dismissal_restrictions"`
+ D bool `json:"dismiss_stale_reviews"`
+ O bool `json:"require_code_owner_reviews"`
+ }{
+ R: req.DismissalRestrictionsRequest,
+ D: req.DismissStaleReviews,
+ O: req.RequireCodeOwnerReviews,
+ }
+ return json.Marshal(newReq)
+}
+
+// PullRequestReviewsEnforcementUpdate represents request to patch the pull request review
+// enforcement of a protected branch. It is separate from PullRequestReviewsEnforcementRequest above
+// because the patch request does not require all fields to be initialized.
+type PullRequestReviewsEnforcementUpdate struct {
+ // Specifies which users and teams can dismiss pull request reviews. Can be omitted.
+ DismissalRestrictionsRequest *DismissalRestrictionsRequest `json:"dismissal_restrictions,omitempty"`
+ // Specifies if approved reviews can be dismissed automatically, when a new commit is pushed. Can be omitted.
+ DismissStaleReviews *bool `json:"dismiss_stale_reviews,omitempty"`
+ // RequireCodeOwnerReviews specifies if an approved review is required in pull requests including files with a designated code owner.
+ RequireCodeOwnerReviews bool `json:"require_code_owner_reviews,omitempty"`
+}
+
+// AdminEnforcement represents the configuration to enforce required status checks for repository administrators.
+type AdminEnforcement struct {
+ URL *string `json:"url,omitempty"`
+ Enabled bool `json:"enabled"`
}
// BranchRestrictions represents the restriction that only certain users or
@@ -558,6 +641,25 @@ type BranchRestrictionsRequest struct {
Teams []string `json:"teams"`
}
+// DismissalRestrictions specifies which users and teams can dismiss pull request reviews.
+type DismissalRestrictions struct {
+ // The list of users who can dimiss pull request reviews.
+ Users []*User `json:"users"`
+ // The list of teams which can dismiss pull request reviews.
+ Teams []*Team `json:"teams"`
+}
+
+// DismissalRestrictionsRequest represents the request to create/edit the
+// restriction to allows only specific users or teams to dimiss pull request reviews. It is
+// separate from DismissalRestrictions above because the request structure is
+// different from the response structure.
+type DismissalRestrictionsRequest struct {
+ // The list of user logins who can dismiss pull request reviews. (Required; use []string{} instead of nil for empty list.)
+ Users []string `json:"users"`
+ // The list of team slugs which can dismiss pull request reviews. (Required; use []string{} instead of nil for empty list.)
+ Teams []string `json:"teams"`
+}
+
// ListBranches lists branches for the specified repository.
//
// GitHub API docs: https://developer.github.com/v3/repos/#list-branches
@@ -728,3 +830,202 @@ func (s *RepositoriesService) License(ctx context.Context, owner, repo string) (
return r, resp, nil
}
+
+// GetPullRequestReviewEnforcement gets pull request review enforcement of a protected branch.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/branches/#get-pull-request-review-enforcement-of-protected-branch
+func (s *RepositoriesService) GetPullRequestReviewEnforcement(ctx context.Context, owner, repo, branch string) (*PullRequestReviewsEnforcement, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_pull_request_reviews", owner, repo, branch)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches
+ req.Header.Set("Accept", mediaTypeProtectedBranchesPreview)
+
+ r := new(PullRequestReviewsEnforcement)
+ resp, err := s.client.Do(ctx, req, r)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return r, resp, nil
+}
+
+// UpdatePullRequestReviewEnforcement patches pull request review enforcement of a protected branch.
+// It requires admin access and branch protection to be enabled.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/branches/#update-pull-request-review-enforcement-of-protected-branch
+func (s *RepositoriesService) UpdatePullRequestReviewEnforcement(ctx context.Context, owner, repo, branch string, patch *PullRequestReviewsEnforcementUpdate) (*PullRequestReviewsEnforcement, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_pull_request_reviews", owner, repo, branch)
+ req, err := s.client.NewRequest("PATCH", u, patch)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches
+ req.Header.Set("Accept", mediaTypeProtectedBranchesPreview)
+
+ r := new(PullRequestReviewsEnforcement)
+ resp, err := s.client.Do(ctx, req, r)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return r, resp, err
+}
+
+// DisableDismissalRestrictions disables dismissal restrictions of a protected branch.
+// It requires admin access and branch protection to be enabled.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/branches/#update-pull-request-review-enforcement-of-protected-branch
+func (s *RepositoriesService) DisableDismissalRestrictions(ctx context.Context, owner, repo, branch string) (*PullRequestReviewsEnforcement, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_pull_request_reviews", owner, repo, branch)
+
+ data := struct {
+ R []interface{} `json:"dismissal_restrictions"`
+ }{[]interface{}{}}
+
+ req, err := s.client.NewRequest("PATCH", u, data)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches
+ req.Header.Set("Accept", mediaTypeProtectedBranchesPreview)
+
+ r := new(PullRequestReviewsEnforcement)
+ resp, err := s.client.Do(ctx, req, r)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return r, resp, err
+}
+
+// RemovePullRequestReviewEnforcement removes pull request enforcement of a protected branch.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/branches/#remove-pull-request-review-enforcement-of-protected-branch
+func (s *RepositoriesService) RemovePullRequestReviewEnforcement(ctx context.Context, owner, repo, branch string) (*Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_pull_request_reviews", owner, repo, branch)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches
+ req.Header.Set("Accept", mediaTypeProtectedBranchesPreview)
+
+ return s.client.Do(ctx, req, nil)
+}
+
+// GetAdminEnforcement gets admin enforcement information of a protected branch.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/branches/#get-admin-enforcement-of-protected-branch
+func (s *RepositoriesService) GetAdminEnforcement(ctx context.Context, owner, repo, branch string) (*AdminEnforcement, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/enforce_admins", owner, repo, branch)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches
+ req.Header.Set("Accept", mediaTypeProtectedBranchesPreview)
+
+ r := new(AdminEnforcement)
+ resp, err := s.client.Do(ctx, req, r)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return r, resp, nil
+}
+
+// AddAdminEnforcement adds admin enforcement to a protected branch.
+// It requires admin access and branch protection to be enabled.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/branches/#add-admin-enforcement-of-protected-branch
+func (s *RepositoriesService) AddAdminEnforcement(ctx context.Context, owner, repo, branch string) (*AdminEnforcement, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/enforce_admins", owner, repo, branch)
+ req, err := s.client.NewRequest("POST", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches
+ req.Header.Set("Accept", mediaTypeProtectedBranchesPreview)
+
+ r := new(AdminEnforcement)
+ resp, err := s.client.Do(ctx, req, r)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return r, resp, err
+}
+
+// RemoveAdminEnforcement removes admin enforcement from a protected branch.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/branches/#remove-admin-enforcement-of-protected-branch
+func (s *RepositoriesService) RemoveAdminEnforcement(ctx context.Context, owner, repo, branch string) (*Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/enforce_admins", owner, repo, branch)
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches
+ req.Header.Set("Accept", mediaTypeProtectedBranchesPreview)
+
+ return s.client.Do(ctx, req, nil)
+}
+
+// Topics represents a collection of repository topics.
+type Topics struct {
+ Names []string `json:"names,omitempty"`
+}
+
+// ListAllTopics lists topics for a repository.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/#list-all-topics-for-a-repository
+func (s *RepositoriesService) ListAllTopics(ctx context.Context, owner, repo string) (*Topics, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/topics", owner, repo)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeTopicsPreview)
+
+ topics := new(Topics)
+ resp, err := s.client.Do(ctx, req, topics)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return topics, resp, nil
+}
+
+// ReplaceAllTopics replaces topics for a repository.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/#replace-all-topics-for-a-repository
+func (s *RepositoriesService) ReplaceAllTopics(ctx context.Context, owner, repo string, topics *Topics) (*Topics, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/topics", owner, repo)
+ req, err := s.client.NewRequest("PUT", u, topics)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeTopicsPreview)
+
+ t := new(Topics)
+ resp, err := s.client.Do(ctx, req, t)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return t, resp, nil
+}
diff --git a/vendor/github.com/google/go-github/github/repos_collaborators.go b/vendor/github.com/google/go-github/github/repos_collaborators.go
index 76e8a1f..100a925 100644
--- a/vendor/github.com/google/go-github/github/repos_collaborators.go
+++ b/vendor/github.com/google/go-github/github/repos_collaborators.go
@@ -10,10 +10,26 @@ import (
"fmt"
)
+// ListCollaboratorsOptions specifies the optional parameters to the
+// RepositoriesService.ListCollaborators method.
+type ListCollaboratorsOptions struct {
+ // Affiliation specifies how collaborators should be filtered by their affiliation.
+ // Possible values are:
+ // outside - All outside collaborators of an organization-owned repository
+ // direct - All collaborators with permissions to an organization-owned repository,
+ // regardless of organization membership status
+ // all - All collaborators the authenticated user can see
+ //
+ // Default value is "all".
+ Affiliation string `url:"affiliation,omitempty"`
+
+ ListOptions
+}
+
// ListCollaborators lists the GitHub users that have access to the repository.
//
-// GitHub API docs: https://developer.github.com/v3/repos/collaborators/#list
-func (s *RepositoriesService) ListCollaborators(ctx context.Context, owner, repo string, opt *ListOptions) ([]*User, *Response, error) {
+// GitHub API docs: https://developer.github.com/v3/repos/collaborators/#list-collaborators
+func (s *RepositoriesService) ListCollaborators(ctx context.Context, owner, repo string, opt *ListCollaboratorsOptions) ([]*User, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/collaborators", owner, repo)
u, err := addOptions(u, opt)
if err != nil {
@@ -91,7 +107,8 @@ type RepositoryAddCollaboratorOptions struct {
Permission string `json:"permission,omitempty"`
}
-// AddCollaborator adds the specified GitHub user as collaborator to the given repo.
+// AddCollaborator sends an invitation to the specified GitHub user
+// to become a collaborator to the given repo.
//
// GitHub API docs: https://developer.github.com/v3/repos/collaborators/#add-user-as-a-collaborator
func (s *RepositoriesService) AddCollaborator(ctx context.Context, owner, repo, user string, opt *RepositoryAddCollaboratorOptions) (*Response, error) {
diff --git a/vendor/github.com/google/go-github/github/repos_commits.go b/vendor/github.com/google/go-github/github/repos_commits.go
index e516f1a..4451b6b 100644
--- a/vendor/github.com/google/go-github/github/repos_commits.go
+++ b/vendor/github.com/google/go-github/github/repos_commits.go
@@ -79,6 +79,12 @@ type CommitsComparison struct {
Commits []RepositoryCommit `json:"commits,omitempty"`
Files []CommitFile `json:"files,omitempty"`
+
+ HTMLURL *string `json:"html_url,omitempty"`
+ PermalinkURL *string `json:"permalink_url,omitempty"`
+ DiffURL *string `json:"diff_url,omitempty"`
+ PatchURL *string `json:"patch_url,omitempty"`
+ URL *string `json:"url,omitempty"` // API URL.
}
func (c CommitsComparison) String() string {
@@ -121,6 +127,9 @@ func (s *RepositoriesService) ListCommits(ctx context.Context, owner, repo strin
return nil, nil, err
}
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeGitSigningPreview)
+
var commits []*RepositoryCommit
resp, err := s.client.Do(ctx, req, &commits)
if err != nil {
diff --git a/vendor/github.com/google/go-github/github/repos_community_health.go b/vendor/github.com/google/go-github/github/repos_community_health.go
new file mode 100644
index 0000000..b5c75d6
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/repos_community_health.go
@@ -0,0 +1,57 @@
+// Copyright 2017 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "context"
+ "fmt"
+ "time"
+)
+
+// Metric represents the different fields for one file in community health files.
+type Metric struct {
+ Name *string `json:"name"`
+ Key *string `json:"key"`
+ URL *string `json:"url"`
+ HTMLURL *string `json:"html_url"`
+}
+
+// CommunityHealthFiles represents the different files in the community health metrics response.
+type CommunityHealthFiles struct {
+ CodeOfConduct *Metric `json:"code_of_conduct"`
+ Contributing *Metric `json:"contributing"`
+ License *Metric `json:"license"`
+ Readme *Metric `json:"readme"`
+}
+
+// CommunityHealthMetrics represents a response containing the community metrics of a repository.
+type CommunityHealthMetrics struct {
+ HealthPercentage *int `json:"health_percentage"`
+ Files *CommunityHealthFiles `json:"files"`
+ UpdatedAt *time.Time `json:"updated_at"`
+}
+
+// GetCommunityHealthMetrics retrieves all the community health metrics for a repository.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/community/#retrieve-community-health-metrics
+func (s *RepositoriesService) GetCommunityHealthMetrics(ctx context.Context, owner, repo string) (*CommunityHealthMetrics, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/community/profile", owner, repo)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeRepositoryCommunityHealthMetricsPreview)
+
+ metrics := &CommunityHealthMetrics{}
+ resp, err := s.client.Do(ctx, req, metrics)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return metrics, resp, nil
+}
diff --git a/vendor/github.com/google/go-github/github/repos_contents.go b/vendor/github.com/google/go-github/github/repos_contents.go
index dfcbe33..ffb56b9 100644
--- a/vendor/github.com/google/go-github/github/repos_contents.go
+++ b/vendor/github.com/google/go-github/github/repos_contents.go
@@ -164,7 +164,7 @@ func (s *RepositoriesService) GetContents(ctx context.Context, owner, repo, path
if directoryUnmarshalError == nil {
return nil, directoryContent, resp, nil
}
- return nil, nil, resp, fmt.Errorf("unmarshalling failed for both file and directory content: %s and %s ", fileUnmarshalError, directoryUnmarshalError)
+ return nil, nil, resp, fmt.Errorf("unmarshalling failed for both file and directory content: %s and %s", fileUnmarshalError, directoryUnmarshalError)
}
// CreateFile creates a new file in a repository at the given path and returns
@@ -248,7 +248,7 @@ func (s *RepositoriesService) GetArchiveLink(ctx context.Context, owner, repo st
}
var resp *http.Response
// Use http.DefaultTransport if no custom Transport is configured
- ctx, req = withContext(ctx, req)
+ req = withContext(ctx, req)
if s.client.client.Transport == nil {
resp, err = http.DefaultTransport.RoundTrip(req)
} else {
diff --git a/vendor/github.com/google/go-github/github/repos_deployments.go b/vendor/github.com/google/go-github/github/repos_deployments.go
index 9054ca9..ec0e842 100644
--- a/vendor/github.com/google/go-github/github/repos_deployments.go
+++ b/vendor/github.com/google/go-github/github/repos_deployments.go
@@ -23,7 +23,7 @@ type Deployment struct {
Description *string `json:"description,omitempty"`
Creator *User `json:"creator,omitempty"`
CreatedAt *Timestamp `json:"created_at,omitempty"`
- UpdatedAt *Timestamp `json:"pushed_at,omitempty"`
+ UpdatedAt *Timestamp `json:"updated_at,omitempty"`
StatusesURL *string `json:"statuses_url,omitempty"`
RepositoryURL *string `json:"repository_url,omitempty"`
}
@@ -137,7 +137,7 @@ type DeploymentStatus struct {
Description *string `json:"description,omitempty"`
TargetURL *string `json:"target_url,omitempty"`
CreatedAt *Timestamp `json:"created_at,omitempty"`
- UpdatedAt *Timestamp `json:"pushed_at,omitempty"`
+ UpdatedAt *Timestamp `json:"updated_at,omitempty"`
DeploymentURL *string `json:"deployment_url,omitempty"`
RepositoryURL *string `json:"repository_url,omitempty"`
}
diff --git a/vendor/github.com/google/go-github/github/repos_forks.go b/vendor/github.com/google/go-github/github/repos_forks.go
index 6b5e4ea..4ca19a4 100644
--- a/vendor/github.com/google/go-github/github/repos_forks.go
+++ b/vendor/github.com/google/go-github/github/repos_forks.go
@@ -35,6 +35,9 @@ func (s *RepositoriesService) ListForks(ctx context.Context, owner, repo string,
return nil, nil, err
}
+ // TODO: remove custom Accept header when topics API fully launches.
+ req.Header.Set("Accept", mediaTypeTopicsPreview)
+
var repos []*Repository
resp, err := s.client.Do(ctx, req, &repos)
if err != nil {
diff --git a/vendor/github.com/google/go-github/github/repos_invitations.go b/vendor/github.com/google/go-github/github/repos_invitations.go
index a803a12..0a2b7c1 100644
--- a/vendor/github.com/google/go-github/github/repos_invitations.go
+++ b/vendor/github.com/google/go-github/github/repos_invitations.go
@@ -90,5 +90,9 @@ func (s *RepositoriesService) UpdateInvitation(ctx context.Context, owner, repo
invite := &RepositoryInvitation{}
resp, err := s.client.Do(ctx, req, invite)
- return invite, resp, err
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return invite, resp, nil
}
diff --git a/vendor/github.com/google/go-github/github/repos_pages.go b/vendor/github.com/google/go-github/github/repos_pages.go
index 3d19b43..075f574 100644
--- a/vendor/github.com/google/go-github/github/repos_pages.go
+++ b/vendor/github.com/google/go-github/github/repos_pages.go
@@ -61,8 +61,13 @@ func (s *RepositoriesService) GetPagesInfo(ctx context.Context, owner, repo stri
// ListPagesBuilds lists the builds for a GitHub Pages site.
//
// GitHub API docs: https://developer.github.com/v3/repos/pages/#list-pages-builds
-func (s *RepositoriesService) ListPagesBuilds(ctx context.Context, owner, repo string) ([]*PagesBuild, *Response, error) {
+func (s *RepositoriesService) ListPagesBuilds(ctx context.Context, owner, repo string, opt *ListOptions) ([]*PagesBuild, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/pages/builds", owner, repo)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
diff --git a/vendor/github.com/google/go-github/github/repos_releases.go b/vendor/github.com/google/go-github/github/repos_releases.go
index 5c27565..1ee7ecf 100644
--- a/vendor/github.com/google/go-github/github/repos_releases.go
+++ b/vendor/github.com/google/go-github/github/repos_releases.go
@@ -244,7 +244,7 @@ func (s *RepositoriesService) DownloadReleaseAsset(ctx context.Context, owner, r
}
defer func() { s.client.client.CheckRedirect = saveRedirect }()
- ctx, req = withContext(ctx, req)
+ req = withContext(ctx, req)
resp, err := s.client.client.Do(req)
if err != nil {
if !strings.Contains(err.Error(), "disable redirect") {
diff --git a/vendor/github.com/google/go-github/github/search.go b/vendor/github.com/google/go-github/github/search.go
index 7668b8b..a597352 100644
--- a/vendor/github.com/google/go-github/github/search.go
+++ b/vendor/github.com/google/go-github/github/search.go
@@ -15,6 +15,14 @@ import (
// SearchService provides access to the search related functions
// in the GitHub API.
//
+// Each method takes a query string defining the search keywords and any search qualifiers.
+// For example, when searching issues, the query "gopher is:issue language:go" will search
+// for issues containing the word "gopher" in Go repositories. The method call
+// opts := &github.SearchOptions{Sort: "created", Order: "asc"}
+// cl.Search.Issues(ctx, "gopher is:issue language:go", opts)
+// will search for such issues, sorting by creation date in ascending order
+// (i.e., oldest first).
+//
// GitHub API docs: https://developer.github.com/v3/search/
type SearchService service
@@ -188,6 +196,10 @@ func (s *SearchService) search(ctx context.Context, searchType string, query str
// Accept header for search commits preview endpoint
// TODO: remove custom Accept header when this API fully launches.
req.Header.Set("Accept", mediaTypeCommitSearchPreview)
+ case searchType == "repositories":
+ // Accept header for search repositories based on topics preview endpoint
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeTopicsPreview)
case opt != nil && opt.TextMatch:
// Accept header defaults to "application/vnd.github.v3+json"
// We change it here to fetch back text-match metadata
diff --git a/vendor/github.com/google/go-github/github/users.go b/vendor/github.com/google/go-github/github/users.go
index d74439c..83cfb45 100644
--- a/vendor/github.com/google/go-github/github/users.go
+++ b/vendor/github.com/google/go-github/github/users.go
@@ -172,8 +172,13 @@ func (s *UsersService) ListAll(ctx context.Context, opt *UserListOptions) ([]*Us
// authenticated user.
//
// GitHub API docs: https://developer.github.com/v3/repos/invitations/#list-a-users-repository-invitations
-func (s *UsersService) ListInvitations(ctx context.Context) ([]*RepositoryInvitation, *Response, error) {
- req, err := s.client.NewRequest("GET", "user/repository_invitations", nil)
+func (s *UsersService) ListInvitations(ctx context.Context, opt *ListOptions) ([]*RepositoryInvitation, *Response, error) {
+ u, err := addOptions("user/repository_invitations", opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
diff --git a/vendor/github.com/google/go-github/github/users_gpg_keys.go b/vendor/github.com/google/go-github/github/users_gpg_keys.go
index be88c04..3e95fb4 100644
--- a/vendor/github.com/google/go-github/github/users_gpg_keys.go
+++ b/vendor/github.com/google/go-github/github/users_gpg_keys.go
@@ -43,7 +43,7 @@ type GPGEmail struct {
// ListGPGKeys lists the public GPG keys for a user. Passing the empty
// string will fetch keys for the authenticated user. It requires authentication
// via Basic Auth or via OAuth with at least read:gpg_key scope.
-
+//
// GitHub API docs: https://developer.github.com/v3/users/gpg_keys/#list-gpg-keys-for-a-user
func (s *UsersService) ListGPGKeys(ctx context.Context, user string, opt *ListOptions) ([]*GPGKey, *Response, error) {
var u string
diff --git a/vendor/github.com/google/go-github/github/without_appengine.go b/vendor/github.com/google/go-github/github/without_appengine.go
index b0edc04..6f8fdac 100644
--- a/vendor/github.com/google/go-github/github/without_appengine.go
+++ b/vendor/github.com/google/go-github/github/without_appengine.go
@@ -14,6 +14,6 @@ import (
"net/http"
)
-func withContext(ctx context.Context, req *http.Request) (context.Context, *http.Request) {
- return ctx, req.WithContext(ctx)
+func withContext(ctx context.Context, req *http.Request) *http.Request {
+ return req.WithContext(ctx)
}
diff --git a/vendor/github.com/googleapis/gax-go/call_option.go b/vendor/github.com/googleapis/gax-go/call_option.go
index 536cb8c..7b62164 100644
--- a/vendor/github.com/googleapis/gax-go/call_option.go
+++ b/vendor/github.com/googleapis/gax-go/call_option.go
@@ -35,6 +35,7 @@ import (
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
)
// CallOption is an option used by Invoke to control behaviors of RPC calls.
@@ -80,7 +81,11 @@ type boRetryer struct {
}
func (r *boRetryer) Retry(err error) (time.Duration, bool) {
- c := grpc.Code(err)
+ st, ok := status.FromError(err)
+ if !ok {
+ return 0, false
+ }
+ c := st.Code()
for _, rc := range r.codes {
if c == rc {
return r.backoff.Pause(), true
@@ -121,6 +126,9 @@ func (bo *Backoff) Pause() time.Duration {
if bo.Multiplier < 1 {
bo.Multiplier = 2
}
+ // Select a duration between zero and the current max. It might seem counterintuitive to
+ // have so much jitter, but https://www.awsarchitectureblog.com/2015/03/backoff.html
+ // argues that that is the best strategy.
d := time.Duration(rand.Int63n(int64(bo.cur)))
bo.cur = time.Duration(float64(bo.cur) * bo.Multiplier)
if bo.cur > bo.Max {
diff --git a/vendor/github.com/googleapis/gax-go/path_template.go b/vendor/github.com/googleapis/gax-go/path_template.go
deleted file mode 100644
index 41bda94..0000000
--- a/vendor/github.com/googleapis/gax-go/path_template.go
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright 2016, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package gax
-
-import (
- "errors"
- "fmt"
- "strings"
-)
-
-type matcher interface {
- match([]string) (int, error)
- String() string
-}
-
-type segment struct {
- matcher
- name string
-}
-
-type labelMatcher string
-
-func (ls labelMatcher) match(segments []string) (int, error) {
- if len(segments) == 0 {
- return 0, fmt.Errorf("expected %s but no more segments found", ls)
- }
- if segments[0] != string(ls) {
- return 0, fmt.Errorf("expected %s but got %s", ls, segments[0])
- }
- return 1, nil
-}
-
-func (ls labelMatcher) String() string {
- return string(ls)
-}
-
-type wildcardMatcher int
-
-func (wm wildcardMatcher) match(segments []string) (int, error) {
- if len(segments) == 0 {
- return 0, errors.New("no more segments found")
- }
- return 1, nil
-}
-
-func (wm wildcardMatcher) String() string {
- return "*"
-}
-
-type pathWildcardMatcher int
-
-func (pwm pathWildcardMatcher) match(segments []string) (int, error) {
- length := len(segments) - int(pwm)
- if length <= 0 {
- return 0, errors.New("not sufficient segments are supplied for path wildcard")
- }
- return length, nil
-}
-
-func (pwm pathWildcardMatcher) String() string {
- return "**"
-}
-
-type ParseError struct {
- Pos int
- Template string
- Message string
-}
-
-func (pe ParseError) Error() string {
- return fmt.Sprintf("at %d of template '%s', %s", pe.Pos, pe.Template, pe.Message)
-}
-
-// PathTemplate manages the template to build and match with paths used
-// by API services. It holds a template and variable names in it, and
-// it can extract matched patterns from a path string or build a path
-// string from a binding.
-//
-// See http.proto in github.com/googleapis/googleapis/ for the details of
-// the template syntax.
-type PathTemplate struct {
- segments []segment
-}
-
-// NewPathTemplate parses a path template, and returns a PathTemplate
-// instance if successful.
-func NewPathTemplate(template string) (*PathTemplate, error) {
- return parsePathTemplate(template)
-}
-
-// MustCompilePathTemplate is like NewPathTemplate but panics if the
-// expression cannot be parsed. It simplifies safe initialization of
-// global variables holding compiled regular expressions.
-func MustCompilePathTemplate(template string) *PathTemplate {
- pt, err := NewPathTemplate(template)
- if err != nil {
- panic(err)
- }
- return pt
-}
-
-// Match attempts to match the given path with the template, and returns
-// the mapping of the variable name to the matched pattern string.
-func (pt *PathTemplate) Match(path string) (map[string]string, error) {
- paths := strings.Split(path, "/")
- values := map[string]string{}
- for _, segment := range pt.segments {
- length, err := segment.match(paths)
- if err != nil {
- return nil, err
- }
- if segment.name != "" {
- value := strings.Join(paths[:length], "/")
- if oldValue, ok := values[segment.name]; ok {
- values[segment.name] = oldValue + "/" + value
- } else {
- values[segment.name] = value
- }
- }
- paths = paths[length:]
- }
- if len(paths) != 0 {
- return nil, fmt.Errorf("Trailing path %s remains after the matching", strings.Join(paths, "/"))
- }
- return values, nil
-}
-
-// Render creates a path string from its template and the binding from
-// the variable name to the value.
-func (pt *PathTemplate) Render(binding map[string]string) (string, error) {
- result := make([]string, 0, len(pt.segments))
- var lastVariableName string
- for _, segment := range pt.segments {
- name := segment.name
- if lastVariableName != "" && name == lastVariableName {
- continue
- }
- lastVariableName = name
- if name == "" {
- result = append(result, segment.String())
- } else if value, ok := binding[name]; ok {
- result = append(result, value)
- } else {
- return "", fmt.Errorf("%s is not found", name)
- }
- }
- built := strings.Join(result, "/")
- return built, nil
-}
diff --git a/vendor/github.com/googleapis/gax-go/path_template_parser.go b/vendor/github.com/googleapis/gax-go/path_template_parser.go
deleted file mode 100644
index 79c8e75..0000000
--- a/vendor/github.com/googleapis/gax-go/path_template_parser.go
+++ /dev/null
@@ -1,227 +0,0 @@
-// Copyright 2016, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-package gax
-
-import (
- "fmt"
- "io"
- "strings"
-)
-
-// This parser follows the syntax of path templates, from
-// https://github.com/googleapis/googleapis/blob/master/google/api/http.proto.
-// The differences are that there is no custom verb, we allow the initial slash
-// to be absent, and that we are not strict as
-// https://tools.ietf.org/html/rfc6570 about the characters in identifiers and
-// literals.
-
-type pathTemplateParser struct {
- r *strings.Reader
- runeCount int // the number of the current rune in the original string
- nextVar int // the number to use for the next unnamed variable
- seenName map[string]bool // names we've seen already
- seenPathWildcard bool // have we seen "**" already?
-}
-
-func parsePathTemplate(template string) (pt *PathTemplate, err error) {
- p := &pathTemplateParser{
- r: strings.NewReader(template),
- seenName: map[string]bool{},
- }
-
- // Handle panics with strings like errors.
- // See pathTemplateParser.error, below.
- defer func() {
- if x := recover(); x != nil {
- errmsg, ok := x.(errString)
- if !ok {
- panic(x)
- }
- pt = nil
- err = ParseError{p.runeCount, template, string(errmsg)}
- }
- }()
-
- segs := p.template()
- // If there is a path wildcard, set its length. We can't do this
- // until we know how many segments we've got all together.
- for i, seg := range segs {
- if _, ok := seg.matcher.(pathWildcardMatcher); ok {
- segs[i].matcher = pathWildcardMatcher(len(segs) - i - 1)
- break
- }
- }
- return &PathTemplate{segments: segs}, nil
-
-}
-
-// Used to indicate errors "thrown" by this parser. We don't use string because
-// many parts of the standard library panic with strings.
-type errString string
-
-// Terminates parsing immediately with an error.
-func (p *pathTemplateParser) error(msg string) {
- panic(errString(msg))
-}
-
-// Template = [ "/" ] Segments
-func (p *pathTemplateParser) template() []segment {
- var segs []segment
- if p.consume('/') {
- // Initial '/' needs an initial empty matcher.
- segs = append(segs, segment{matcher: labelMatcher("")})
- }
- return append(segs, p.segments("")...)
-}
-
-// Segments = Segment { "/" Segment }
-func (p *pathTemplateParser) segments(name string) []segment {
- var segs []segment
- for {
- subsegs := p.segment(name)
- segs = append(segs, subsegs...)
- if !p.consume('/') {
- break
- }
- }
- return segs
-}
-
-// Segment = "*" | "**" | LITERAL | Variable
-func (p *pathTemplateParser) segment(name string) []segment {
- if p.consume('*') {
- if name == "" {
- name = fmt.Sprintf("$%d", p.nextVar)
- p.nextVar++
- }
- if p.consume('*') {
- if p.seenPathWildcard {
- p.error("multiple '**' disallowed")
- }
- p.seenPathWildcard = true
- // We'll change 0 to the right number at the end.
- return []segment{{name: name, matcher: pathWildcardMatcher(0)}}
- }
- return []segment{{name: name, matcher: wildcardMatcher(0)}}
- }
- if p.consume('{') {
- if name != "" {
- p.error("recursive named bindings are not allowed")
- }
- return p.variable()
- }
- return []segment{{name: name, matcher: labelMatcher(p.literal())}}
-}
-
-// Variable = "{" FieldPath [ "=" Segments ] "}"
-// "{" is already consumed.
-func (p *pathTemplateParser) variable() []segment {
- // Simplification: treat FieldPath as LITERAL, instead of IDENT { '.' IDENT }
- name := p.literal()
- if p.seenName[name] {
- p.error(name + " appears multiple times")
- }
- p.seenName[name] = true
- var segs []segment
- if p.consume('=') {
- segs = p.segments(name)
- } else {
- // "{var}" is equivalent to "{var=*}"
- segs = []segment{{name: name, matcher: wildcardMatcher(0)}}
- }
- if !p.consume('}') {
- p.error("expected '}'")
- }
- return segs
-}
-
-// A literal is any sequence of characters other than a few special ones.
-// The list of stop characters is not quite the same as in the template RFC.
-func (p *pathTemplateParser) literal() string {
- lit := p.consumeUntil("/*}{=")
- if lit == "" {
- p.error("empty literal")
- }
- return lit
-}
-
-// Read runes until EOF or one of the runes in stopRunes is encountered.
-// If the latter, unread the stop rune. Return the accumulated runes as a string.
-func (p *pathTemplateParser) consumeUntil(stopRunes string) string {
- var runes []rune
- for {
- r, ok := p.readRune()
- if !ok {
- break
- }
- if strings.IndexRune(stopRunes, r) >= 0 {
- p.unreadRune()
- break
- }
- runes = append(runes, r)
- }
- return string(runes)
-}
-
-// If the next rune is r, consume it and return true.
-// Otherwise, leave the input unchanged and return false.
-func (p *pathTemplateParser) consume(r rune) bool {
- rr, ok := p.readRune()
- if !ok {
- return false
- }
- if r == rr {
- return true
- }
- p.unreadRune()
- return false
-}
-
-// Read the next rune from the input. Return it.
-// The second return value is false at EOF.
-func (p *pathTemplateParser) readRune() (rune, bool) {
- r, _, err := p.r.ReadRune()
- if err == io.EOF {
- return r, false
- }
- if err != nil {
- p.error(err.Error())
- }
- p.runeCount++
- return r, true
-}
-
-// Put the last rune that was read back on the input.
-func (p *pathTemplateParser) unreadRune() {
- if err := p.r.UnreadRune(); err != nil {
- p.error(err.Error())
- }
- p.runeCount--
-}
diff --git a/vendor/github.com/gorilla/csrf/Gopkg.lock b/vendor/github.com/gorilla/csrf/Gopkg.lock
new file mode 100644
index 0000000..5db9540
--- /dev/null
+++ b/vendor/github.com/gorilla/csrf/Gopkg.lock
@@ -0,0 +1,27 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+ name = "github.com/gorilla/context"
+ packages = ["."]
+ revision = "1ea25387ff6f684839d82767c1733ff4d4d15d0a"
+ version = "v1.1"
+
+[[projects]]
+ name = "github.com/gorilla/securecookie"
+ packages = ["."]
+ revision = "667fe4e3466a040b780561fe9b51a83a3753eefc"
+ version = "v1.1"
+
+[[projects]]
+ name = "github.com/pkg/errors"
+ packages = ["."]
+ revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
+ version = "v0.8.0"
+
+[solve-meta]
+ analyzer-name = "dep"
+ analyzer-version = 1
+ inputs-digest = "1695686bc8fa0eb76df9fe8c5ca473686071ddcf795a0595a9465a03e8ac9bef"
+ solver-name = "gps-cdcl"
+ solver-version = 1
diff --git a/vendor/github.com/gorilla/csrf/Gopkg.toml b/vendor/github.com/gorilla/csrf/Gopkg.toml
new file mode 100644
index 0000000..02837b1
--- /dev/null
+++ b/vendor/github.com/gorilla/csrf/Gopkg.toml
@@ -0,0 +1,34 @@
+
+# Gopkg.toml example
+#
+# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
+# for detailed Gopkg.toml documentation.
+#
+# required = ["github.com/user/thing/cmd/thing"]
+# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
+#
+# [[constraint]]
+# name = "github.com/user/project"
+# version = "1.0.0"
+#
+# [[constraint]]
+# name = "github.com/user/project2"
+# branch = "dev"
+# source = "github.com/myfork/project2"
+#
+# [[override]]
+# name = "github.com/x/y"
+# version = "2.4.0"
+
+
+[[constraint]]
+ name = "github.com/gorilla/context"
+ version = "1.1.0"
+
+[[constraint]]
+ name = "github.com/gorilla/securecookie"
+ version = "1.1.0"
+
+[[constraint]]
+ name = "github.com/pkg/errors"
+ version = "0.8.0"
diff --git a/vendor/github.com/gorilla/csrf/doc.go b/vendor/github.com/gorilla/csrf/doc.go
index 301abe0..3046cdc 100644
--- a/vendor/github.com/gorilla/csrf/doc.go
+++ b/vendor/github.com/gorilla/csrf/doc.go
@@ -4,13 +4,15 @@ prevention middleware for Go web applications & services.
It includes:
- * The `csrf.Protect` middleware/handler provides CSRF protection on routes
- attached to a router or a sub-router.
- * A `csrf.Token` function that provides the token to pass into your response,
- whether that be a HTML form or a JSON response body.
- * ... and a `csrf.TemplateField` helper that you can pass into your `html/template`
- templates to replace a `{{ .csrfField }}` template tag with a hidden input
- field.
+* The `csrf.Protect` middleware/handler provides CSRF protection on routes
+attached to a router or a sub-router.
+
+* A `csrf.Token` function that provides the token to pass into your response,
+whether that be a HTML form or a JSON response body.
+
+* ... and a `csrf.TemplateField` helper that you can pass into your `html/template`
+templates to replace a `{{ .csrfField }}` template tag with a hidden input
+field.
gorilla/csrf is easy to use: add the middleware to individual handlers with
the below:
@@ -31,66 +33,66 @@ validation.
Here's the common use-case: HTML forms you want to provide CSRF protection for,
in order to protect malicious POST requests being made:
- package main
-
- import (
- "fmt"
- "html/template"
- "net/http"
-
- "github.com/gorilla/csrf"
- "github.com/gorilla/mux"
- )
-
- var form = `
- <html>
- <head>
- <title>Sign Up!</title>
- </head>
- <body>
- <form method="POST" action="/signup/post" accept-charset="UTF-8">
- <input type="text" name="name">
- <input type="text" name="email">
- <!--
- The default template tag used by the CSRF middleware .
- This will be replaced with a hidden <input> field containing the
- masked CSRF token.
- -->
- {{ .csrfField }}
- <input type="submit" value="Sign up!">
- </form>
- </body>
- </html>
- `
-
- var t = template.Must(template.New("signup_form.tmpl").Parse(form))
-
- func main() {
- r := mux.NewRouter()
- r.HandleFunc("/signup", ShowSignupForm)
- // All POST requests without a valid token will return HTTP 403 Forbidden.
- r.HandleFunc("/signup/post", SubmitSignupForm)
-
- // Add the middleware to your router by wrapping it.
- http.ListenAndServe(":8000",
- csrf.Protect([]byte("32-byte-long-auth-key"))(r))
- // PS: Don't forget to pass csrf.Secure(false) if you're developing locally
- // over plain HTTP (just don't leave it on in production).
- }
-
- func ShowSignupForm(w http.ResponseWriter, r *http.Request) {
- // signup_form.tmpl just needs a {{ .csrfField }} template tag for
- // csrf.TemplateField to inject the CSRF token into. Easy!
- t.ExecuteTemplate(w, "signup_form.tmpl", map[string]interface{}{
- csrf.TemplateTag: csrf.TemplateField(r),
- })
- }
-
- func SubmitSignupForm(w http.ResponseWriter, r *http.Request) {
- // We can trust that requests making it this far have satisfied
- // our CSRF protection requirements.
- fmt.Fprintf(w, "%v\n", r.PostForm)
- }
+ package main
+
+ import (
+ "fmt"
+ "html/template"
+ "net/http"
+
+ "github.com/gorilla/csrf"
+ "github.com/gorilla/mux"
+ )
+
+ var form = `
+ <html>
+ <head>
+ <title>Sign Up!</title>
+ </head>
+ <body>
+ <form method="POST" action="/signup/post" accept-charset="UTF-8">
+ <input type="text" name="name">
+ <input type="text" name="email">
+ <!--
+ The default template tag used by the CSRF middleware .
+ This will be replaced with a hidden <input> field containing the
+ masked CSRF token.
+ -->
+ {{ .csrfField }}
+ <input type="submit" value="Sign up!">
+ </form>
+ </body>
+ </html>
+ `
+
+ var t = template.Must(template.New("signup_form.tmpl").Parse(form))
+
+ func main() {
+ r := mux.NewRouter()
+ r.HandleFunc("/signup", ShowSignupForm)
+ // All POST requests without a valid token will return HTTP 403 Forbidden.
+ r.HandleFunc("/signup/post", SubmitSignupForm)
+
+ // Add the middleware to your router by wrapping it.
+ http.ListenAndServe(":8000",
+ csrf.Protect([]byte("32-byte-long-auth-key"))(r))
+ // PS: Don't forget to pass csrf.Secure(false) if you're developing locally
+ // over plain HTTP (just don't leave it on in production).
+ }
+
+ func ShowSignupForm(w http.ResponseWriter, r *http.Request) {
+ // signup_form.tmpl just needs a {{ .csrfField }} template tag for
+ // csrf.TemplateField to inject the CSRF token into. Easy!
+ t.ExecuteTemplate(w, "signup_form.tmpl", map[string]interface{}{
+ csrf.TemplateTag: csrf.TemplateField(r),
+ })
+ }
+
+ func SubmitSignupForm(w http.ResponseWriter, r *http.Request) {
+ // We can trust that requests making it this far have satisfied
+ // our CSRF protection requirements.
+ fmt.Fprintf(w, "%v\n", r.PostForm)
+ }
Note that the CSRF middleware will (by necessity) consume the request body if the
token is passed via POST form values. If you need to consume this in your
@@ -101,39 +103,39 @@ You can also send the CSRF token in the response header. This approach is useful
if you're using a front-end JavaScript framework like Ember or Angular, or are
providing a JSON API:
- package main
+ package main
- import (
- "github.com/gorilla/csrf"
- "github.com/gorilla/mux"
- )
+ import (
+ "github.com/gorilla/csrf"
+ "github.com/gorilla/mux"
+ )
- func main() {
- r := mux.NewRouter()
+ func main() {
+ r := mux.NewRouter()
- api := r.PathPrefix("/api").Subrouter()
- api.HandleFunc("/user/:id", GetUser).Methods("GET")
+ api := r.PathPrefix("/api").Subrouter()
+ api.HandleFunc("/user/:id", GetUser).Methods("GET")
- http.ListenAndServe(":8000",
- csrf.Protect([]byte("32-byte-long-auth-key"))(r))
- }
+ http.ListenAndServe(":8000",
+ csrf.Protect([]byte("32-byte-long-auth-key"))(r))
+ }
- func GetUser(w http.ResponseWriter, r *http.Request) {
- // Authenticate the request, get the id from the route params,
- // and fetch the user from the DB, etc.
+ func GetUser(w http.ResponseWriter, r *http.Request) {
+ // Authenticate the request, get the id from the route params,
+ // and fetch the user from the DB, etc.
- // Get the token and pass it in the CSRF header. Our JSON-speaking client
- // or JavaScript framework can now read the header and return the token in
- // in its own "X-CSRF-Token" request header on the subsequent POST.
- w.Header().Set("X-CSRF-Token", csrf.Token(r))
- b, err := json.Marshal(user)
- if err != nil {
- http.Error(w, err.Error(), 500)
- return
- }
+ // Get the token and pass it in the CSRF header. Our JSON-speaking client
+ // or JavaScript framework can now read the header and return the token in
+ // in its own "X-CSRF-Token" request header on the subsequent POST.
+ w.Header().Set("X-CSRF-Token", csrf.Token(r))
+ b, err := json.Marshal(user)
+ if err != nil {
+ http.Error(w, err.Error(), 500)
+ return
+ }
- w.Write(b)
- }
+ w.Write(b)
+ }
If you're writing a client that's supposed to mimic browser behavior, make sure to
send back the CSRF cookie (the default name is _gorilla_csrf, but this can be changed
@@ -141,23 +143,29 @@ with the CookieName Option) along with either the X-CSRF-Token header or the gor
In addition: getting CSRF protection right is important, so here's some background:
- * This library generates unique-per-request (masked) tokens as a mitigation
- against the [BREACH attack](http://breachattack.com/).
- * The 'base' (unmasked) token is stored in the session, which means that
- multiple browser tabs won't cause a user problems as their per-request token
- is compared with the base token.
- * Operates on a "whitelist only" approach where safe (non-mutating) HTTP methods
- (GET, HEAD, OPTIONS, TRACE) are the *only* methods where token validation is not
- enforced.
- * The design is based on the battle-tested
- [Django](https://docs.djangoproject.com/en/1.8/ref/csrf/) and [Ruby on
- Rails](http://api.rubyonrails.org/classes/ActionController/RequestForgeryProtection.html)
- approaches.
- * Cookies are authenticated and based on the [securecookie](https://github.com/gorilla/securecookie)
- library. They're also Secure (issued over HTTPS only) and are HttpOnly
- by default, because sane defaults are important.
- * Go's `crypto/rand` library is used to generate the 32 byte (256 bit) tokens
- and the one-time-pad used for masking them.
+* This library generates unique-per-request (masked) tokens as a mitigation
+against the BREACH attack (http://breachattack.com/).
+
+* The 'base' (unmasked) token is stored in the session, which means that
+multiple browser tabs won't cause a user problems as their per-request token
+is compared with the base token.
+
+* Operates on a "whitelist only" approach where safe (non-mutating) HTTP methods
+(GET, HEAD, OPTIONS, TRACE) are the *only* methods where token validation is not
+enforced.
+
+* The design is based on the battle-tested Django
+(https://docs.djangoproject.com/en/1.8/ref/csrf/) and Ruby on Rails
+(http://api.rubyonrails.org/classes/ActionController/RequestForgeryProtection.html)
+approaches.
+
+* Cookies are authenticated and based on the securecookie
+(https://github.com/gorilla/securecookie) library. They're also Secure (issued
+over HTTPS only) and are HttpOnly by default, because sane defaults are
+important.
+
+* Go's `crypto/rand` library is used to generate the 32 byte (256 bit) tokens
+and the one-time-pad used for masking them.
This library does not seek to be adventurous.
diff --git a/vendor/github.com/gorilla/handlers/handlers.go b/vendor/github.com/gorilla/handlers/handlers.go
index 551f8ff..75db7f8 100644
--- a/vendor/github.com/gorilla/handlers/handlers.go
+++ b/vendor/github.com/gorilla/handlers/handlers.go
@@ -79,9 +79,9 @@ func (h combinedLoggingHandler) ServeHTTP(w http.ResponseWriter, req *http.Reque
}
func makeLogger(w http.ResponseWriter) loggingResponseWriter {
- var logger loggingResponseWriter = &responseLogger{w: w}
+ var logger loggingResponseWriter = &responseLogger{w: w, status: http.StatusOK}
if _, ok := w.(http.Hijacker); ok {
- logger = &hijackLogger{responseLogger{w: w}}
+ logger = &hijackLogger{responseLogger{w: w, status: http.StatusOK}}
}
h, ok1 := logger.(http.Hijacker)
c, ok2 := w.(http.CloseNotifier)
@@ -114,10 +114,6 @@ func (l *responseLogger) Header() http.Header {
}
func (l *responseLogger) Write(b []byte) (int, error) {
- if l.status == 0 {
- // The status will be StatusOK if WriteHeader has not been called yet
- l.status = http.StatusOK
- }
size, err := l.w.Write(b)
l.size += size
return size, err
diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md
index cdab878..b096bf6 100644
--- a/vendor/github.com/gorilla/mux/README.md
+++ b/vendor/github.com/gorilla/mux/README.md
@@ -15,7 +15,7 @@ The name mux stands for "HTTP request multiplexer". Like the standard `http.Serv
* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`.
* Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers.
-* URL hosts and paths can have variables with an optional regular expression.
+* URL hosts, paths and query values can have variables with an optional regular expression.
* Registered URLs can be built, or "reversed", which helps maintaining references to resources.
* Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching.
@@ -24,9 +24,9 @@ The name mux stands for "HTTP request multiplexer". Like the standard `http.Serv
* [Install](#install)
* [Examples](#examples)
* [Matching Routes](#matching-routes)
-* [Listing Routes](#listing-routes)
* [Static Files](#static-files)
* [Registered URLs](#registered-urls)
+* [Walking Routes](#walking-routes)
* [Full Example](#full-example)
---
@@ -135,6 +135,14 @@ r.HandleFunc("/products", ProductsHandler).
Schemes("http")
```
+Routes are tested in the order they were added to the router. If two routes match, the first one wins:
+
+```go
+r := mux.NewRouter()
+r.HandleFunc("/specific", specificHandler)
+r.PathPrefix("/").Handler(catchAllHandler)
+```
+
Setting the same matching conditions again and again can be boring, so we have a way to group several routes that share the same requirements. We call it "subrouting".
For example, let's say we have several URLs that should only match when the host is `www.example.com`. Create a route for that host and get a "subrouter" from it:
@@ -168,7 +176,6 @@ s.HandleFunc("/{key}/", ProductHandler)
// "/products/{key}/details"
s.HandleFunc("/{key}/details", ProductDetailsHandler)
```
-
### Listing Routes
Routes on a mux can be listed using the Router.Walk method—useful for generating documentation:
@@ -179,6 +186,7 @@ package main
import (
"fmt"
"net/http"
+ "strings"
"github.com/gorilla/mux"
)
@@ -190,15 +198,25 @@ func handler(w http.ResponseWriter, r *http.Request) {
func main() {
r := mux.NewRouter()
r.HandleFunc("/", handler)
- r.HandleFunc("/products", handler)
- r.HandleFunc("/articles", handler)
- r.HandleFunc("/articles/{id}", handler)
+ r.HandleFunc("/products", handler).Methods("POST")
+ r.HandleFunc("/articles", handler).Methods("GET")
+ r.HandleFunc("/articles/{id}", handler).Methods("GET", "PUT")
r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error {
t, err := route.GetPathTemplate()
if err != nil {
return err
}
- fmt.Println(t)
+ // p will contain regular expression is compatible with regular expression in Perl, Python, and other languages.
+ // for instance the regular expression for path '/articles/{id}' will be '^/articles/(?P<v0>[^/]+)$'
+ p, err := route.GetPathRegexp()
+ if err != nil {
+ return err
+ }
+ m, err := route.GetMethods()
+ if err != nil {
+ return err
+ }
+ fmt.Println(strings.Join(m, ","), t, p)
return nil
})
http.Handle("/", r)
@@ -258,19 +276,21 @@ url, err := r.Get("article").URL("category", "technology", "id", "42")
"/articles/technology/42"
```
-This also works for host variables:
+This also works for host and query value variables:
```go
r := mux.NewRouter()
r.Host("{subdomain}.domain.com").
Path("/articles/{category}/{id:[0-9]+}").
+ Queries("filter", "{filter}").
HandlerFunc(ArticleHandler).
Name("article")
-// url.String() will be "http://news.domain.com/articles/technology/42"
+// url.String() will be "http://news.domain.com/articles/technology/42?filter=gorilla"
url, err := r.Get("article").URL("subdomain", "news",
"category", "technology",
- "id", "42")
+ "id", "42",
+ "filter", "gorilla")
```
All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match.
@@ -308,6 +328,37 @@ url, err := r.Get("article").URL("subdomain", "news",
"id", "42")
```
+### Walking Routes
+
+The `Walk` function on `mux.Router` can be used to visit all of the routes that are registered on a router. For example,
+the following prints all of the registered routes:
+
+```go
+r := mux.NewRouter()
+r.HandleFunc("/", handler)
+r.HandleFunc("/products", handler).Methods("POST")
+r.HandleFunc("/articles", handler).Methods("GET")
+r.HandleFunc("/articles/{id}", handler).Methods("GET", "PUT")
+r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error {
+ t, err := route.GetPathTemplate()
+ if err != nil {
+ return err
+ }
+ // p will contain a regular expression that is compatible with regular expressions in Perl, Python, and other languages.
+ // For example, the regular expression for path '/articles/{id}' will be '^/articles/(?P<v0>[^/]+)$'.
+ p, err := route.GetPathRegexp()
+ if err != nil {
+ return err
+ }
+ m, err := route.GetMethods()
+ if err != nil {
+ return err
+ }
+ fmt.Println(strings.Join(m, ","), t, p)
+ return nil
+})
+```
+
## Full Example
Here's a complete, runnable example of a small `mux` based server:
diff --git a/vendor/github.com/gorilla/mux/doc.go b/vendor/github.com/gorilla/mux/doc.go
index 00daf4a..cce30b2 100644
--- a/vendor/github.com/gorilla/mux/doc.go
+++ b/vendor/github.com/gorilla/mux/doc.go
@@ -12,8 +12,8 @@ or other conditions. The main features are:
* Requests can be matched based on URL host, path, path prefix, schemes,
header and query values, HTTP methods or using custom matchers.
- * URL hosts and paths can have variables with an optional regular
- expression.
+ * URL hosts, paths and query values can have variables with an optional
+ regular expression.
* Registered URLs can be built, or "reversed", which helps maintaining
references to resources.
* Routes can be used as subrouters: nested routes are only tested if the
@@ -188,18 +188,20 @@ key/value pairs for the route variables. For the previous route, we would do:
"/articles/technology/42"
-This also works for host variables:
+This also works for host and query value variables:
r := mux.NewRouter()
r.Host("{subdomain}.domain.com").
Path("/articles/{category}/{id:[0-9]+}").
+ Queries("filter", "{filter}").
HandlerFunc(ArticleHandler).
Name("article")
- // url.String() will be "http://news.domain.com/articles/technology/42"
+ // url.String() will be "http://news.domain.com/articles/technology/42?filter=gorilla"
url, err := r.Get("article").URL("subdomain", "news",
"category", "technology",
- "id", "42")
+ "id", "42",
+ "filter", "gorilla")
All variables defined in the route are required, and their values must
conform to the corresponding patterns. These requirements guarantee that a
diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go
index d66ec38..fb69196 100644
--- a/vendor/github.com/gorilla/mux/mux.go
+++ b/vendor/github.com/gorilla/mux/mux.go
@@ -13,6 +13,10 @@ import (
"strings"
)
+var (
+ ErrMethodMismatch = errors.New("method is not allowed")
+)
+
// NewRouter returns a new router instance.
func NewRouter() *Router {
return &Router{namedRoutes: make(map[string]*Route), KeepContext: false}
@@ -39,6 +43,10 @@ func NewRouter() *Router {
type Router struct {
// Configurable Handler to be used when no route matches.
NotFoundHandler http.Handler
+
+ // Configurable Handler to be used when the request method does not match the route.
+ MethodNotAllowedHandler http.Handler
+
// Parent route, if this is a subrouter.
parent parentRoute
// Routes to be matched, in order.
@@ -65,6 +73,11 @@ func (r *Router) Match(req *http.Request, match *RouteMatch) bool {
}
}
+ if match.MatchErr == ErrMethodMismatch && r.MethodNotAllowedHandler != nil {
+ match.Handler = r.MethodNotAllowedHandler
+ return true
+ }
+
// Closest match for a router (includes sub-routers)
if r.NotFoundHandler != nil {
match.Handler = r.NotFoundHandler
@@ -105,9 +118,15 @@ func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
req = setVars(req, match.Vars)
req = setCurrentRoute(req, match.Route)
}
+
+ if handler == nil && match.MatchErr == ErrMethodMismatch {
+ handler = methodNotAllowedHandler()
+ }
+
if handler == nil {
handler = http.NotFoundHandler()
}
+
if !r.KeepContext {
defer contextClear(req)
}
@@ -176,6 +195,13 @@ func (r *Router) UseEncodedPath() *Router {
// parentRoute
// ----------------------------------------------------------------------------
+func (r *Router) getBuildScheme() string {
+ if r.parent != nil {
+ return r.parent.getBuildScheme()
+ }
+ return ""
+}
+
// getNamedRoutes returns the map where named routes are registered.
func (r *Router) getNamedRoutes() map[string]*Route {
if r.namedRoutes == nil {
@@ -299,10 +325,6 @@ type WalkFunc func(route *Route, router *Router, ancestors []*Route) error
func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error {
for _, t := range r.routes {
- if t.regexp == nil || t.regexp.path == nil || t.regexp.path.template == "" {
- continue
- }
-
err := walkFn(t, r, ancestors)
if err == SkipRouter {
continue
@@ -312,10 +334,12 @@ func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error {
}
for _, sr := range t.matchers {
if h, ok := sr.(*Router); ok {
+ ancestors = append(ancestors, t)
err := h.walk(walkFn, ancestors)
if err != nil {
return err
}
+ ancestors = ancestors[:len(ancestors)-1]
}
}
if h, ok := t.handler.(*Router); ok {
@@ -339,6 +363,11 @@ type RouteMatch struct {
Route *Route
Handler http.Handler
Vars map[string]string
+
+ // MatchErr is set to appropriate matching error
+ // It is set to ErrMethodMismatch if there is a mismatch in
+ // the request method and route method
+ MatchErr error
}
type contextKey int
@@ -458,7 +487,7 @@ func mapFromPairsToString(pairs ...string) (map[string]string, error) {
return m, nil
}
-// mapFromPairsToRegex converts variadic string paramers to a
+// mapFromPairsToRegex converts variadic string parameters to a
// string to regex map.
func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) {
length, err := checkPairs(pairs...)
@@ -540,3 +569,12 @@ func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]s
}
return true
}
+
+// methodNotAllowed replies to the request with an HTTP status code 405.
+func methodNotAllowed(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusMethodNotAllowed)
+}
+
+// methodNotAllowedHandler returns a simple request handler
+// that replies to each request with a status code 405.
+func methodNotAllowedHandler() http.Handler { return http.HandlerFunc(methodNotAllowed) }
diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go
index 0189ad3..80d1f78 100644
--- a/vendor/github.com/gorilla/mux/regexp.go
+++ b/vendor/github.com/gorilla/mux/regexp.go
@@ -35,7 +35,7 @@ func newRouteRegexp(tpl string, matchHost, matchPrefix, matchQuery, strictSlash,
// Now let's parse it.
defaultPattern := "[^/]+"
if matchQuery {
- defaultPattern = "[^?&]*"
+ defaultPattern = ".*"
} else if matchHost {
defaultPattern = "[^.]+"
matchPrefix = false
@@ -178,6 +178,9 @@ func (r *routeRegexp) url(values map[string]string) (string, error) {
if !ok {
return "", fmt.Errorf("mux: missing route variable %q", v)
}
+ if r.matchQuery {
+ value = url.QueryEscape(value)
+ }
urlValues[k] = value
}
rv := fmt.Sprintf(r.reverse, urlValues...)
diff --git a/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/gorilla/mux/route.go
index 5544c1f..6863adb 100644
--- a/vendor/github.com/gorilla/mux/route.go
+++ b/vendor/github.com/gorilla/mux/route.go
@@ -31,6 +31,8 @@ type Route struct {
skipClean bool
// If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to"
useEncodedPath bool
+ // The scheme used when building URLs.
+ buildScheme string
// If true, this route never matches: it is only used to build URLs.
buildOnly bool
// The name used to build URLs.
@@ -50,12 +52,27 @@ func (r *Route) Match(req *http.Request, match *RouteMatch) bool {
if r.buildOnly || r.err != nil {
return false
}
+
+ var matchErr error
+
// Match everything.
for _, m := range r.matchers {
if matched := m.Match(req, match); !matched {
+ if _, ok := m.(methodMatcher); ok {
+ matchErr = ErrMethodMismatch
+ continue
+ }
+ matchErr = nil
return false
}
}
+
+ if matchErr != nil {
+ match.MatchErr = matchErr
+ return false
+ }
+
+ match.MatchErr = nil
// Yay, we have a match. Let's collect some info about it.
if match.Route == nil {
match.Route = r
@@ -66,6 +83,7 @@ func (r *Route) Match(req *http.Request, match *RouteMatch) bool {
if match.Vars == nil {
match.Vars = make(map[string]string)
}
+
// Set variables.
if r.regexp != nil {
r.regexp.setMatch(req, match, r)
@@ -394,6 +412,9 @@ func (r *Route) Schemes(schemes ...string) *Route {
for k, v := range schemes {
schemes[k] = strings.ToLower(v)
}
+ if r.buildScheme == "" && len(schemes) > 0 {
+ r.buildScheme = schemes[0]
+ }
return r.addMatcher(schemeMatcher(schemes))
}
@@ -477,22 +498,33 @@ func (r *Route) URL(pairs ...string) (*url.URL, error) {
return nil, err
}
var scheme, host, path string
+ queries := make([]string, 0, len(r.regexp.queries))
if r.regexp.host != nil {
- // Set a default scheme.
- scheme = "http"
if host, err = r.regexp.host.url(values); err != nil {
return nil, err
}
+ scheme = "http"
+ if s := r.getBuildScheme(); s != "" {
+ scheme = s
+ }
}
if r.regexp.path != nil {
if path, err = r.regexp.path.url(values); err != nil {
return nil, err
}
}
+ for _, q := range r.regexp.queries {
+ var query string
+ if query, err = q.url(values); err != nil {
+ return nil, err
+ }
+ queries = append(queries, query)
+ }
return &url.URL{
- Scheme: scheme,
- Host: host,
- Path: path,
+ Scheme: scheme,
+ Host: host,
+ Path: path,
+ RawQuery: strings.Join(queries, "&"),
}, nil
}
@@ -514,10 +546,14 @@ func (r *Route) URLHost(pairs ...string) (*url.URL, error) {
if err != nil {
return nil, err
}
- return &url.URL{
+ u := &url.URL{
Scheme: "http",
Host: host,
- }, nil
+ }
+ if s := r.getBuildScheme(); s != "" {
+ u.Scheme = s
+ }
+ return u, nil
}
// URLPath builds the path part of the URL for a route. See Route.URL().
@@ -558,6 +594,36 @@ func (r *Route) GetPathTemplate() (string, error) {
return r.regexp.path.template, nil
}
+// GetPathRegexp returns the expanded regular expression used to match route path.
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if the route does not define a path.
+func (r *Route) GetPathRegexp() (string, error) {
+ if r.err != nil {
+ return "", r.err
+ }
+ if r.regexp == nil || r.regexp.path == nil {
+ return "", errors.New("mux: route does not have a path")
+ }
+ return r.regexp.path.regexp.String(), nil
+}
+
+// GetMethods returns the methods the route matches against
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An empty list will be returned if route does not have methods.
+func (r *Route) GetMethods() ([]string, error) {
+ if r.err != nil {
+ return nil, r.err
+ }
+ for _, m := range r.matchers {
+ if methods, ok := m.(methodMatcher); ok {
+ return []string(methods), nil
+ }
+ }
+ return nil, nil
+}
+
// GetHostTemplate returns the template used to build the
// route match.
// This is useful for building simple REST API documentation and for instrumentation
@@ -599,11 +665,22 @@ func (r *Route) buildVars(m map[string]string) map[string]string {
// parentRoute allows routes to know about parent host and path definitions.
type parentRoute interface {
+ getBuildScheme() string
getNamedRoutes() map[string]*Route
getRegexpGroup() *routeRegexpGroup
buildVars(map[string]string) map[string]string
}
+func (r *Route) getBuildScheme() string {
+ if r.buildScheme != "" {
+ return r.buildScheme
+ }
+ if r.parent != nil {
+ return r.parent.getBuildScheme()
+ }
+ return ""
+}
+
// getNamedRoutes returns the map where named routes are registered.
func (r *Route) getNamedRoutes() map[string]*Route {
if r.parent == nil {
diff --git a/vendor/github.com/gorilla/sessions/README.md b/vendor/github.com/gorilla/sessions/README.md
index ebc60d0..10eb7f0 100644
--- a/vendor/github.com/gorilla/sessions/README.md
+++ b/vendor/github.com/gorilla/sessions/README.md
@@ -44,14 +44,14 @@ Let's start with an example that shows the sessions API in a nutshell:
First we initialize a session store calling `NewCookieStore()` and passing a
secret key used to authenticate the session. Inside the handler, we call
-`store.Get()` to retrieve an existing session or a new one. Then we set some
-session values in session.Values, which is a `map[interface{}]interface{}`.
+`store.Get()` to retrieve an existing session or create a new one. Then we set
+some session values in session.Values, which is a `map[interface{}]interface{}`.
And finally we call `session.Save()` to save the session in the response.
Important Note: If you aren't using gorilla/mux, you need to wrap your handlers
with
[`context.ClearHandler`](http://www.gorillatoolkit.org/pkg/context#ClearHandler)
-as or else you will leak memory! An easy way to do this is to wrap the top-level
+or else you will leak memory! An easy way to do this is to wrap the top-level
mux when calling http.ListenAndServe:
```go
diff --git a/vendor/github.com/gorilla/sessions/doc.go b/vendor/github.com/gorilla/sessions/doc.go
index 668e05e..591d932 100644
--- a/vendor/github.com/gorilla/sessions/doc.go
+++ b/vendor/github.com/gorilla/sessions/doc.go
@@ -29,8 +29,7 @@ Let's start with an example that shows the sessions API in a nutshell:
var store = sessions.NewCookieStore([]byte("something-very-secret"))
func MyHandler(w http.ResponseWriter, r *http.Request) {
- // Get a session. We're ignoring the error resulted from decoding an
- // existing session: Get() always returns a session, even if empty.
+ // Get a session. Get() always returns a session, even if empty.
session, err := store.Get(r, "session-name")
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
diff --git a/vendor/github.com/hashicorp/go-multierror/multierror.go b/vendor/github.com/hashicorp/go-multierror/multierror.go
index 2ea0827..89b1422 100644
--- a/vendor/github.com/hashicorp/go-multierror/multierror.go
+++ b/vendor/github.com/hashicorp/go-multierror/multierror.go
@@ -40,11 +40,11 @@ func (e *Error) GoString() string {
}
// WrappedErrors returns the list of errors that this Error is wrapping.
-// It is an implementatin of the errwrap.Wrapper interface so that
+// It is an implementation of the errwrap.Wrapper interface so that
// multierror.Error can be used with that library.
//
// This method is not safe to be called concurrently and is no different
-// than accessing the Errors field directly. It is implementd only to
+// than accessing the Errors field directly. It is implemented only to
// satisfy the errwrap.Wrapper interface.
func (e *Error) WrappedErrors() []error {
return e.Errors
diff --git a/vendor/github.com/hashicorp/hcl/appveyor.yml b/vendor/github.com/hashicorp/hcl/appveyor.yml
index 3c8cdf8..4db0b71 100644
--- a/vendor/github.com/hashicorp/hcl/appveyor.yml
+++ b/vendor/github.com/hashicorp/hcl/appveyor.yml
@@ -4,7 +4,7 @@ clone_folder: c:\gopath\src\github.com\hashicorp\hcl
environment:
GOPATH: c:\gopath
init:
- - git config --global core.autocrlf true
+ - git config --global core.autocrlf false
install:
- cmd: >-
echo %Path%
diff --git a/vendor/github.com/hashicorp/hcl/decoder.go b/vendor/github.com/hashicorp/hcl/decoder.go
index 0b39c1b..bed9ebb 100644
--- a/vendor/github.com/hashicorp/hcl/decoder.go
+++ b/vendor/github.com/hashicorp/hcl/decoder.go
@@ -89,7 +89,7 @@ func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error
switch k.Kind() {
case reflect.Bool:
return d.decodeBool(name, node, result)
- case reflect.Float64:
+ case reflect.Float32, reflect.Float64:
return d.decodeFloat(name, node, result)
case reflect.Int, reflect.Int32, reflect.Int64:
return d.decodeInt(name, node, result)
@@ -137,13 +137,13 @@ func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) e
func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error {
switch n := node.(type) {
case *ast.LiteralType:
- if n.Token.Type == token.FLOAT {
+ if n.Token.Type == token.FLOAT || n.Token.Type == token.NUMBER {
v, err := strconv.ParseFloat(n.Token.Text, 64)
if err != nil {
return err
}
- result.Set(reflect.ValueOf(v))
+ result.Set(reflect.ValueOf(v).Convert(result.Type()))
return nil
}
}
@@ -573,7 +573,11 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value)
// Compile the list of all the fields that we're going to be decoding
// from all the structs.
- fields := make(map[*reflect.StructField]reflect.Value)
+ type field struct {
+ field reflect.StructField
+ val reflect.Value
+ }
+ fields := []field{}
for len(structs) > 0 {
structVal := structs[0]
structs = structs[1:]
@@ -616,7 +620,7 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value)
}
// Normal struct field, store it away
- fields[&fieldType] = structVal.Field(i)
+ fields = append(fields, field{fieldType, structVal.Field(i)})
}
}
@@ -624,26 +628,27 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value)
decodedFields := make([]string, 0, len(fields))
decodedFieldsVal := make([]reflect.Value, 0)
unusedKeysVal := make([]reflect.Value, 0)
- for fieldType, field := range fields {
- if !field.IsValid() {
+ for _, f := range fields {
+ field, fieldValue := f.field, f.val
+ if !fieldValue.IsValid() {
// This should never happen
panic("field is not valid")
}
// If we can't set the field, then it is unexported or something,
// and we just continue onwards.
- if !field.CanSet() {
+ if !fieldValue.CanSet() {
continue
}
- fieldName := fieldType.Name
+ fieldName := field.Name
- tagValue := fieldType.Tag.Get(tagName)
+ tagValue := field.Tag.Get(tagName)
tagParts := strings.SplitN(tagValue, ",", 2)
if len(tagParts) >= 2 {
switch tagParts[1] {
case "decodedFields":
- decodedFieldsVal = append(decodedFieldsVal, field)
+ decodedFieldsVal = append(decodedFieldsVal, fieldValue)
continue
case "key":
if item == nil {
@@ -654,10 +659,10 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value)
}
}
- field.SetString(item.Keys[0].Token.Value().(string))
+ fieldValue.SetString(item.Keys[0].Token.Value().(string))
continue
case "unusedKeys":
- unusedKeysVal = append(unusedKeysVal, field)
+ unusedKeysVal = append(unusedKeysVal, fieldValue)
continue
}
}
@@ -684,7 +689,7 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value)
// because we actually want the value.
fieldName = fmt.Sprintf("%s.%s", name, fieldName)
if len(prefixMatches.Items) > 0 {
- if err := d.decode(fieldName, prefixMatches, field); err != nil {
+ if err := d.decode(fieldName, prefixMatches, fieldValue); err != nil {
return err
}
}
@@ -694,12 +699,12 @@ func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value)
decodeNode = &ast.ObjectList{Items: ot.List.Items}
}
- if err := d.decode(fieldName, decodeNode, field); err != nil {
+ if err := d.decode(fieldName, decodeNode, fieldValue); err != nil {
return err
}
}
- decodedFields = append(decodedFields, fieldType.Name)
+ decodedFields = append(decodedFields, field.Name)
}
if len(decodedFieldsVal) > 0 {
diff --git a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
index 6e54bed..098e1bc 100644
--- a/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
+++ b/vendor/github.com/hashicorp/hcl/hcl/parser/parser.go
@@ -3,6 +3,7 @@
package parser
import (
+ "bytes"
"errors"
"fmt"
"strings"
@@ -36,6 +37,11 @@ func newParser(src []byte) *Parser {
// Parse returns the fully parsed source and returns the abstract syntax tree.
func Parse(src []byte) (*ast.File, error) {
+ // normalize all line endings
+ // since the scanner and output only work with "\n" line endings, we may
+ // end up with dangling "\r" characters in the parsed data.
+ src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1)
+
p := newParser(src)
return p.Parse()
}
@@ -191,9 +197,12 @@ func (p *Parser) objectItem() (*ast.ObjectItem, error) {
keyStr = append(keyStr, k.Token.Text)
}
- return nil, fmt.Errorf(
- "key '%s' expected start of object ('{') or assignment ('=')",
- strings.Join(keyStr, " "))
+ return nil, &PosError{
+ Pos: p.tok.Pos,
+ Err: fmt.Errorf(
+ "key '%s' expected start of object ('{') or assignment ('=')",
+ strings.Join(keyStr, " ")),
+ }
}
// do a look-ahead for line comment
@@ -313,7 +322,10 @@ func (p *Parser) objectType() (*ast.ObjectType, error) {
// No error, scan and expect the ending to be a brace
if tok := p.scan(); tok.Type != token.RBRACE {
- return nil, fmt.Errorf("object expected closing RBRACE got: %s", tok.Type)
+ return nil, &PosError{
+ Pos: tok.Pos,
+ Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type),
+ }
}
o.List = l
diff --git a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
index 6966236..6601ef7 100644
--- a/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
+++ b/vendor/github.com/hashicorp/hcl/hcl/scanner/scanner.go
@@ -351,7 +351,7 @@ func (s *Scanner) scanNumber(ch rune) token.Type {
return token.NUMBER
}
-// scanMantissa scans the mantissa begining from the rune. It returns the next
+// scanMantissa scans the mantissa beginning from the rune. It returns the next
// non decimal rune. It's used to determine wheter it's a fraction or exponent.
func (s *Scanner) scanMantissa(ch rune) rune {
scanned := false
diff --git a/vendor/github.com/hashicorp/hcl/json/parser/parser.go b/vendor/github.com/hashicorp/hcl/json/parser/parser.go
index 6f46085..125a5f0 100644
--- a/vendor/github.com/hashicorp/hcl/json/parser/parser.go
+++ b/vendor/github.com/hashicorp/hcl/json/parser/parser.go
@@ -147,7 +147,7 @@ func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
// Done
return keys, nil
case token.ILLEGAL:
- fmt.Println("illegal")
+ return nil, errors.New("illegal")
default:
return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
}
diff --git a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
index dd5c72b..fe3f0f0 100644
--- a/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
+++ b/vendor/github.com/hashicorp/hcl/json/scanner/scanner.go
@@ -246,7 +246,7 @@ func (s *Scanner) scanNumber(ch rune) token.Type {
return token.NUMBER
}
-// scanMantissa scans the mantissa begining from the rune. It returns the next
+// scanMantissa scans the mantissa beginning from the rune. It returns the next
// non decimal rune. It's used to determine wheter it's a fraction or exponent.
func (s *Scanner) scanMantissa(ch rune) rune {
scanned := false
diff --git a/vendor/github.com/hashicorp/vault/api/auth_token.go b/vendor/github.com/hashicorp/vault/api/auth_token.go
index aff10f4..4f74f61 100644
--- a/vendor/github.com/hashicorp/vault/api/auth_token.go
+++ b/vendor/github.com/hashicorp/vault/api/auth_token.go
@@ -135,6 +135,26 @@ func (c *TokenAuth) RenewSelf(increment int) (*Secret, error) {
return ParseSecret(resp.Body)
}
+// RenewTokenAsSelf behaves like renew-self, but authenticates using a provided
+// token instead of the token attached to the client.
+func (c *TokenAuth) RenewTokenAsSelf(token string, increment int) (*Secret, error) {
+ r := c.c.NewRequest("PUT", "/v1/auth/token/renew-self")
+ r.ClientToken = token
+
+ body := map[string]interface{}{"increment": increment}
+ if err := r.SetJSONBody(body); err != nil {
+ return nil, err
+ }
+
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ return ParseSecret(resp.Body)
+}
+
// RevokeAccessor revokes a token associated with the given accessor
// along with all the child tokens.
func (c *TokenAuth) RevokeAccessor(accessor string) error {
diff --git a/vendor/github.com/hashicorp/vault/api/client.go b/vendor/github.com/hashicorp/vault/api/client.go
index 5f8a6f6..0204cec 100644
--- a/vendor/github.com/hashicorp/vault/api/client.go
+++ b/vendor/github.com/hashicorp/vault/api/client.go
@@ -3,9 +3,11 @@ package api
import (
"crypto/tls"
"fmt"
+ "net"
"net/http"
"net/url"
"os"
+ "path"
"strconv"
"strings"
"sync"
@@ -15,6 +17,7 @@ import (
"github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/go-rootcerts"
+ "github.com/hashicorp/vault/helper/parseutil"
"github.com/sethgrid/pester"
)
@@ -23,6 +26,7 @@ const EnvVaultCACert = "VAULT_CACERT"
const EnvVaultCAPath = "VAULT_CAPATH"
const EnvVaultClientCert = "VAULT_CLIENT_CERT"
const EnvVaultClientKey = "VAULT_CLIENT_KEY"
+const EnvVaultClientTimeout = "VAULT_CLIENT_TIMEOUT"
const EnvVaultInsecure = "VAULT_SKIP_VERIFY"
const EnvVaultTLSServerName = "VAULT_TLS_SERVER_NAME"
const EnvVaultWrapTTL = "VAULT_WRAP_TTL"
@@ -53,6 +57,9 @@ type Config struct {
// MaxRetries controls the maximum number of times to retry when a 5xx error
// occurs. Set to 0 or less to disable retrying. Defaults to 0.
MaxRetries int
+
+ // Timeout is for setting custom timeout parameter in the HttpClient
+ Timeout time.Duration
}
// TLSConfig contains the parameters needed to configure TLS on the HTTP client
@@ -155,6 +162,7 @@ func (c *Config) ReadEnvironment() error {
var envCAPath string
var envClientCert string
var envClientKey string
+ var envClientTimeout time.Duration
var envInsecure bool
var envTLSServerName string
var envMaxRetries *uint64
@@ -182,6 +190,13 @@ func (c *Config) ReadEnvironment() error {
if v := os.Getenv(EnvVaultClientKey); v != "" {
envClientKey = v
}
+ if t := os.Getenv(EnvVaultClientTimeout); t != "" {
+ clientTimeout, err := parseutil.ParseDurationSecond(t)
+ if err != nil {
+ return fmt.Errorf("Could not parse %s", EnvVaultClientTimeout)
+ }
+ envClientTimeout = clientTimeout
+ }
if v := os.Getenv(EnvVaultInsecure); v != "" {
var err error
envInsecure, err = strconv.ParseBool(v)
@@ -214,6 +229,10 @@ func (c *Config) ReadEnvironment() error {
c.MaxRetries = int(*envMaxRetries) + 1
}
+ if envClientTimeout != 0 {
+ c.Timeout = envClientTimeout
+ }
+
return nil
}
@@ -223,6 +242,7 @@ type Client struct {
addr *url.URL
config *Config
token string
+ headers http.Header
wrappingLookupFunc WrappingLookupFunc
}
@@ -247,10 +267,14 @@ func NewClient(c *Config) (*Client, error) {
if c.HttpClient == nil {
c.HttpClient = DefaultConfig().HttpClient
}
+ if c.HttpClient.Transport == nil {
+ c.HttpClient.Transport = cleanhttp.DefaultTransport()
+ }
- tp := c.HttpClient.Transport.(*http.Transport)
- if err := http2.ConfigureTransport(tp); err != nil {
- return nil, err
+ if tp, ok := c.HttpClient.Transport.(*http.Transport); ok {
+ if err := http2.ConfigureTransport(tp); err != nil {
+ return nil, err
+ }
}
redirFunc := func() {
@@ -303,6 +327,11 @@ func (c *Client) SetMaxRetries(retries int) {
c.config.MaxRetries = retries
}
+// SetClientTimeout sets the client request timeout
+func (c *Client) SetClientTimeout(timeout time.Duration) {
+ c.config.Timeout = timeout
+}
+
// SetWrappingLookupFunc sets a lookup function that returns desired wrap TTLs
// for a given operation and path
func (c *Client) SetWrappingLookupFunc(lookupFunc WrappingLookupFunc) {
@@ -326,17 +355,38 @@ func (c *Client) ClearToken() {
c.token = ""
}
+// SetHeaders sets the headers to be used for future requests.
+func (c *Client) SetHeaders(headers http.Header) {
+ c.headers = headers
+}
+
+// Clone creates a copy of this client.
+func (c *Client) Clone() (*Client, error) {
+ return NewClient(c.config)
+}
+
// NewRequest creates a new raw request object to query the Vault server
// configured for this client. This is an advanced method and generally
// doesn't need to be called externally.
-func (c *Client) NewRequest(method, path string) *Request {
+func (c *Client) NewRequest(method, requestPath string) *Request {
+ // if SRV records exist (see https://tools.ietf.org/html/draft-andrews-http-srv-02), lookup the SRV
+ // record and take the highest match; this is not designed for high-availability, just discovery
+ var host string = c.addr.Host
+ if c.addr.Port() == "" {
+ // Internet Draft specifies that the SRV record is ignored if a port is given
+ _, addrs, err := net.LookupSRV("http", "tcp", c.addr.Hostname())
+ if err == nil && len(addrs) > 0 {
+ host = fmt.Sprintf("%s:%d", addrs[0].Target, addrs[0].Port)
+ }
+ }
+
req := &Request{
Method: method,
URL: &url.URL{
User: c.addr.User,
Scheme: c.addr.Scheme,
- Host: c.addr.Host,
- Path: path,
+ Host: host,
+ Path: path.Join(c.addr.Path, requestPath),
},
ClientToken: c.token,
Params: make(map[string][]string),
@@ -344,18 +394,24 @@ func (c *Client) NewRequest(method, path string) *Request {
var lookupPath string
switch {
- case strings.HasPrefix(path, "/v1/"):
- lookupPath = strings.TrimPrefix(path, "/v1/")
- case strings.HasPrefix(path, "v1/"):
- lookupPath = strings.TrimPrefix(path, "v1/")
+ case strings.HasPrefix(requestPath, "/v1/"):
+ lookupPath = strings.TrimPrefix(requestPath, "/v1/")
+ case strings.HasPrefix(requestPath, "v1/"):
+ lookupPath = strings.TrimPrefix(requestPath, "v1/")
default:
- lookupPath = path
+ lookupPath = requestPath
}
if c.wrappingLookupFunc != nil {
req.WrapTTL = c.wrappingLookupFunc(method, lookupPath)
} else {
req.WrapTTL = DefaultWrappingLookupFunc(method, lookupPath)
}
+ if c.config.Timeout != 0 {
+ c.config.HttpClient.Timeout = c.config.Timeout
+ }
+ if c.headers != nil {
+ req.Headers = c.headers
+ }
return req
}
diff --git a/vendor/github.com/hashicorp/vault/api/renewer.go b/vendor/github.com/hashicorp/vault/api/renewer.go
new file mode 100644
index 0000000..a2a4b66
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/renewer.go
@@ -0,0 +1,302 @@
+package api
+
+import (
+ "errors"
+ "math/rand"
+ "sync"
+ "time"
+)
+
+var (
+ ErrRenewerMissingInput = errors.New("missing input to renewer")
+ ErrRenewerMissingSecret = errors.New("missing secret to renew")
+ ErrRenewerNotRenewable = errors.New("secret is not renewable")
+ ErrRenewerNoSecretData = errors.New("returned empty secret data")
+
+ // DefaultRenewerGrace is the default grace period
+ DefaultRenewerGrace = 15 * time.Second
+
+ // DefaultRenewerRenewBuffer is the default size of the buffer for renew
+ // messages on the channel.
+ DefaultRenewerRenewBuffer = 5
+)
+
+// Renewer is a process for renewing a secret.
+//
+// renewer, err := client.NewRenewer(&RenewerInput{
+// Secret: mySecret,
+// })
+// go renewer.Renew()
+// defer renewer.Stop()
+//
+// for {
+// select {
+// case err := <-renewer.DoneCh():
+// if err != nil {
+// log.Fatal(err)
+// }
+//
+// // Renewal is now over
+// case renewal := <-renewer.RenewCh():
+// log.Printf("Successfully renewed: %#v", renewal)
+// }
+// }
+//
+//
+// The `DoneCh` will return if renewal fails or if the remaining lease duration
+// after a renewal is less than or equal to the grace (in number of seconds). In
+// both cases, the caller should attempt a re-read of the secret. Clients should
+// check the return value of the channel to see if renewal was successful.
+type Renewer struct {
+ l sync.Mutex
+
+ client *Client
+ secret *Secret
+ grace time.Duration
+ random *rand.Rand
+ doneCh chan error
+ renewCh chan *RenewOutput
+
+ stopped bool
+ stopCh chan struct{}
+}
+
+// RenewerInput is used as input to the renew function.
+type RenewerInput struct {
+ // Secret is the secret to renew
+ Secret *Secret
+
+ // Grace is a minimum renewal before returning so the upstream client
+ // can do a re-read. This can be used to prevent clients from waiting
+ // too long to read a new credential and incur downtime.
+ Grace time.Duration
+
+ // Rand is the randomizer to use for underlying randomization. If not
+ // provided, one will be generated and seeded automatically. If provided, it
+ // is assumed to have already been seeded.
+ Rand *rand.Rand
+
+ // RenewBuffer is the size of the buffered channel where renew messages are
+ // dispatched.
+ RenewBuffer int
+}
+
+// RenewOutput is the metadata returned to the client (if it's listening) to
+// renew messages.
+type RenewOutput struct {
+ // RenewedAt is the timestamp when the renewal took place (UTC).
+ RenewedAt time.Time
+
+ // Secret is the underlying renewal data. It's the same struct as all data
+ // that is returned from Vault, but since this is renewal data, it will not
+ // usually include the secret itself.
+ Secret *Secret
+}
+
+// NewRenewer creates a new renewer from the given input.
+func (c *Client) NewRenewer(i *RenewerInput) (*Renewer, error) {
+ if i == nil {
+ return nil, ErrRenewerMissingInput
+ }
+
+ secret := i.Secret
+ if secret == nil {
+ return nil, ErrRenewerMissingSecret
+ }
+
+ grace := i.Grace
+ if grace == 0 {
+ grace = DefaultRenewerGrace
+ }
+
+ random := i.Rand
+ if random == nil {
+ random = rand.New(rand.NewSource(int64(time.Now().Nanosecond())))
+ }
+
+ renewBuffer := i.RenewBuffer
+ if renewBuffer == 0 {
+ renewBuffer = DefaultRenewerRenewBuffer
+ }
+
+ return &Renewer{
+ client: c,
+ secret: secret,
+ grace: grace,
+ random: random,
+ doneCh: make(chan error, 1),
+ renewCh: make(chan *RenewOutput, renewBuffer),
+
+ stopped: false,
+ stopCh: make(chan struct{}),
+ }, nil
+}
+
+// DoneCh returns the channel where the renewer will publish when renewal stops.
+// If there is an error, this will be an error.
+func (r *Renewer) DoneCh() <-chan error {
+ return r.doneCh
+}
+
+// RenewCh is a channel that receives a message when a successful renewal takes
+// place and includes metadata about the renewal.
+func (r *Renewer) RenewCh() <-chan *RenewOutput {
+ return r.renewCh
+}
+
+// Stop stops the renewer.
+func (r *Renewer) Stop() {
+ r.l.Lock()
+ if !r.stopped {
+ close(r.stopCh)
+ r.stopped = true
+ }
+ r.l.Unlock()
+}
+
+// Renew starts a background process for renewing this secret. When the secret
+// is has auth data, this attempts to renew the auth (token). When the secret
+// has a lease, this attempts to renew the lease.
+func (r *Renewer) Renew() {
+ var result error
+ if r.secret.Auth != nil {
+ result = r.renewAuth()
+ } else {
+ result = r.renewLease()
+ }
+
+ select {
+ case r.doneCh <- result:
+ case <-r.stopCh:
+ }
+}
+
+// renewAuth is a helper for renewing authentication.
+func (r *Renewer) renewAuth() error {
+ if !r.secret.Auth.Renewable || r.secret.Auth.ClientToken == "" {
+ return ErrRenewerNotRenewable
+ }
+
+ client, token := r.client, r.secret.Auth.ClientToken
+
+ for {
+ // Check if we are stopped.
+ select {
+ case <-r.stopCh:
+ return nil
+ default:
+ }
+
+ // Renew the auth.
+ renewal, err := client.Auth().Token().RenewTokenAsSelf(token, 0)
+ if err != nil {
+ return err
+ }
+
+ // Push a message that a renewal took place.
+ select {
+ case r.renewCh <- &RenewOutput{time.Now().UTC(), renewal}:
+ default:
+ }
+
+ // Somehow, sometimes, this happens.
+ if renewal == nil || renewal.Auth == nil {
+ return ErrRenewerNoSecretData
+ }
+
+ // Do nothing if we are not renewable
+ if !renewal.Auth.Renewable {
+ return ErrRenewerNotRenewable
+ }
+
+ // Grab the lease duration and sleep duration - note that we grab the auth
+ // lease duration, not the secret lease duration.
+ leaseDuration := time.Duration(renewal.Auth.LeaseDuration) * time.Second
+ sleepDuration := r.sleepDuration(leaseDuration)
+
+ // If we are within grace, return now.
+ if leaseDuration <= r.grace || sleepDuration <= r.grace {
+ return nil
+ }
+
+ select {
+ case <-r.stopCh:
+ return nil
+ case <-time.After(sleepDuration):
+ continue
+ }
+ }
+}
+
+// renewLease is a helper for renewing a lease.
+func (r *Renewer) renewLease() error {
+ if !r.secret.Renewable || r.secret.LeaseID == "" {
+ return ErrRenewerNotRenewable
+ }
+
+ client, leaseID := r.client, r.secret.LeaseID
+
+ for {
+ // Check if we are stopped.
+ select {
+ case <-r.stopCh:
+ return nil
+ default:
+ }
+
+ // Renew the lease.
+ renewal, err := client.Sys().Renew(leaseID, 0)
+ if err != nil {
+ return err
+ }
+
+ // Push a message that a renewal took place.
+ select {
+ case r.renewCh <- &RenewOutput{time.Now().UTC(), renewal}:
+ default:
+ }
+
+ // Somehow, sometimes, this happens.
+ if renewal == nil {
+ return ErrRenewerNoSecretData
+ }
+
+ // Do nothing if we are not renewable
+ if !renewal.Renewable {
+ return ErrRenewerNotRenewable
+ }
+
+ // Grab the lease duration and sleep duration
+ leaseDuration := time.Duration(renewal.LeaseDuration) * time.Second
+ sleepDuration := r.sleepDuration(leaseDuration)
+
+ // If we are within grace, return now.
+ if leaseDuration <= r.grace || sleepDuration <= r.grace {
+ return nil
+ }
+
+ select {
+ case <-r.stopCh:
+ return nil
+ case <-time.After(sleepDuration):
+ continue
+ }
+ }
+}
+
+// sleepDuration calculates the time to sleep given the base lease duration. The
+// base is the resulting lease duration. It will be reduced to 1/3 and
+// multiplied by a random float between 0.0 and 1.0. This extra randomness
+// prevents multiple clients from all trying to renew simultaneously.
+func (r *Renewer) sleepDuration(base time.Duration) time.Duration {
+ sleep := float64(base)
+
+ // Renew at 1/3 the remaining lease. This will give us an opportunity to retry
+ // at least one more time should the first renewal fail.
+ sleep = sleep / 3.0
+
+ // Use a randomness so many clients do not hit Vault simultaneously.
+ sleep = sleep * (r.random.Float64() + 1) / 2.0
+
+ return time.Duration(sleep)
+}
diff --git a/vendor/github.com/hashicorp/vault/api/request.go b/vendor/github.com/hashicorp/vault/api/request.go
index 685e2d7..83a28bd 100644
--- a/vendor/github.com/hashicorp/vault/api/request.go
+++ b/vendor/github.com/hashicorp/vault/api/request.go
@@ -14,6 +14,7 @@ type Request struct {
Method string
URL *url.URL
Params url.Values
+ Headers http.Header
ClientToken string
WrapTTL string
Obj interface{}
@@ -60,6 +61,14 @@ func (r *Request) ToHTTP() (*http.Request, error) {
req.URL.Host = r.URL.Host
req.Host = r.URL.Host
+ if r.Headers != nil {
+ for header, vals := range r.Headers {
+ for _, val := range vals {
+ req.Header.Add(header, val)
+ }
+ }
+ }
+
if len(r.ClientToken) != 0 {
req.Header.Set("X-Vault-Token", r.ClientToken)
}
diff --git a/vendor/github.com/hashicorp/vault/api/response.go b/vendor/github.com/hashicorp/vault/api/response.go
index 7c8ac9f..05502e1 100644
--- a/vendor/github.com/hashicorp/vault/api/response.go
+++ b/vendor/github.com/hashicorp/vault/api/response.go
@@ -25,8 +25,9 @@ func (r *Response) DecodeJSON(out interface{}) error {
// this will fully consume the response body, but will not close it. The
// body must still be closed manually.
func (r *Response) Error() error {
- // 200 to 399 are okay status codes
- if r.StatusCode >= 200 && r.StatusCode < 400 {
+ // 200 to 399 are okay status codes. 429 is the code for health status of
+ // standby nodes.
+ if (r.StatusCode >= 200 && r.StatusCode < 400) || r.StatusCode == 429 {
return nil
}
diff --git a/vendor/github.com/hashicorp/vault/api/secret.go b/vendor/github.com/hashicorp/vault/api/secret.go
index 14924f9..7478a0c 100644
--- a/vendor/github.com/hashicorp/vault/api/secret.go
+++ b/vendor/github.com/hashicorp/vault/api/secret.go
@@ -42,6 +42,7 @@ type SecretWrapInfo struct {
Token string `json:"token"`
TTL int `json:"ttl"`
CreationTime time.Time `json:"creation_time"`
+ CreationPath string `json:"creation_path"`
WrappedAccessor string `json:"wrapped_accessor"`
}
diff --git a/vendor/github.com/hashicorp/vault/api/ssh.go b/vendor/github.com/hashicorp/vault/api/ssh.go
index 7c3e56b..a17b0eb 100644
--- a/vendor/github.com/hashicorp/vault/api/ssh.go
+++ b/vendor/github.com/hashicorp/vault/api/ssh.go
@@ -36,3 +36,20 @@ func (c *SSH) Credential(role string, data map[string]interface{}) (*Secret, err
return ParseSecret(resp.Body)
}
+
+// SignKey signs the given public key and returns a signed public key to pass
+// along with the SSH request.
+func (c *SSH) SignKey(role string, data map[string]interface{}) (*Secret, error) {
+ r := c.c.NewRequest("PUT", fmt.Sprintf("/v1/%s/sign/%s", c.MountPoint, role))
+ if err := r.SetJSONBody(data); err != nil {
+ return nil, err
+ }
+
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ return ParseSecret(resp.Body)
+}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_auth.go b/vendor/github.com/hashicorp/vault/api/sys_auth.go
index f9f3c8c..32f4bbd 100644
--- a/vendor/github.com/hashicorp/vault/api/sys_auth.go
+++ b/vendor/github.com/hashicorp/vault/api/sys_auth.go
@@ -82,19 +82,27 @@ func (c *Sys) DisableAuth(path string) error {
// documentation. Please refer to that documentation for more details.
type EnableAuthOptions struct {
- Type string `json:"type" structs:"type"`
- Description string `json:"description" structs:"description"`
- Local bool `json:"local" structs:"local"`
+ Type string `json:"type" structs:"type"`
+ Description string `json:"description" structs:"description"`
+ Config AuthConfigInput `json:"config" structs:"config"`
+ Local bool `json:"local" structs:"local"`
+ PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty"`
+}
+
+type AuthConfigInput struct {
+ PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty" mapstructure:"plugin_name"`
}
type AuthMount struct {
Type string `json:"type" structs:"type" mapstructure:"type"`
Description string `json:"description" structs:"description" mapstructure:"description"`
+ Accessor string `json:"accessor" structs:"accessor" mapstructure:"accessor"`
Config AuthConfigOutput `json:"config" structs:"config" mapstructure:"config"`
Local bool `json:"local" structs:"local" mapstructure:"local"`
}
type AuthConfigOutput struct {
- DefaultLeaseTTL int `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"`
- MaxLeaseTTL int `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"`
+ DefaultLeaseTTL int `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"`
+ MaxLeaseTTL int `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"`
+ PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty" mapstructure:"plugin_name"`
}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_config_cors.go b/vendor/github.com/hashicorp/vault/api/sys_config_cors.go
new file mode 100644
index 0000000..e7f2a59
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/sys_config_cors.go
@@ -0,0 +1,56 @@
+package api
+
+func (c *Sys) CORSStatus() (*CORSResponse, error) {
+ r := c.c.NewRequest("GET", "/v1/sys/config/cors")
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var result CORSResponse
+ err = resp.DecodeJSON(&result)
+ return &result, err
+}
+
+func (c *Sys) ConfigureCORS(req *CORSRequest) (*CORSResponse, error) {
+ r := c.c.NewRequest("PUT", "/v1/sys/config/cors")
+ if err := r.SetJSONBody(req); err != nil {
+ return nil, err
+ }
+
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var result CORSResponse
+ err = resp.DecodeJSON(&result)
+ return &result, err
+}
+
+func (c *Sys) DisableCORS() (*CORSResponse, error) {
+ r := c.c.NewRequest("DELETE", "/v1/sys/config/cors")
+
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var result CORSResponse
+ err = resp.DecodeJSON(&result)
+ return &result, err
+
+}
+
+type CORSRequest struct {
+ AllowedOrigins string `json:"allowed_origins"`
+ Enabled bool `json:"enabled"`
+}
+
+type CORSResponse struct {
+ AllowedOrigins string `json:"allowed_origins"`
+ Enabled bool `json:"enabled"`
+}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_health.go b/vendor/github.com/hashicorp/vault/api/sys_health.go
new file mode 100644
index 0000000..822354c
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/api/sys_health.go
@@ -0,0 +1,29 @@
+package api
+
+func (c *Sys) Health() (*HealthResponse, error) {
+ r := c.c.NewRequest("GET", "/v1/sys/health")
+ // If the code is 400 or above it will automatically turn into an error,
+ // but the sys/health API defaults to returning 5xx when not sealed or
+ // inited, so we force this code to be something else so we parse correctly
+ r.Params.Add("sealedcode", "299")
+ r.Params.Add("uninitcode", "299")
+ resp, err := c.c.RawRequest(r)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+
+ var result HealthResponse
+ err = resp.DecodeJSON(&result)
+ return &result, err
+}
+
+type HealthResponse struct {
+ Initialized bool `json:"initialized"`
+ Sealed bool `json:"sealed"`
+ Standby bool `json:"standby"`
+ ServerTimeUTC int64 `json:"server_time_utc"`
+ Version string `json:"version"`
+ ClusterName string `json:"cluster_name,omitempty"`
+ ClusterID string `json:"cluster_id,omitempty"`
+}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_leader.go b/vendor/github.com/hashicorp/vault/api/sys_leader.go
index 201ac73..4951c46 100644
--- a/vendor/github.com/hashicorp/vault/api/sys_leader.go
+++ b/vendor/github.com/hashicorp/vault/api/sys_leader.go
@@ -14,7 +14,8 @@ func (c *Sys) Leader() (*LeaderResponse, error) {
}
type LeaderResponse struct {
- HAEnabled bool `json:"ha_enabled"`
- IsSelf bool `json:"is_self"`
- LeaderAddress string `json:"leader_address"`
+ HAEnabled bool `json:"ha_enabled"`
+ IsSelf bool `json:"is_self"`
+ LeaderAddress string `json:"leader_address"`
+ LeaderClusterAddress string `json:"leader_cluster_address"`
}
diff --git a/vendor/github.com/hashicorp/vault/api/sys_lease.go b/vendor/github.com/hashicorp/vault/api/sys_leases.go
index e5c19c4..34bd99e 100644
--- a/vendor/github.com/hashicorp/vault/api/sys_lease.go
+++ b/vendor/github.com/hashicorp/vault/api/sys_leases.go
@@ -1,7 +1,7 @@
package api
func (c *Sys) Renew(id string, increment int) (*Secret, error) {
- r := c.c.NewRequest("PUT", "/v1/sys/renew")
+ r := c.c.NewRequest("PUT", "/v1/sys/leases/renew")
body := map[string]interface{}{
"increment": increment,
@@ -21,7 +21,7 @@ func (c *Sys) Renew(id string, increment int) (*Secret, error) {
}
func (c *Sys) Revoke(id string) error {
- r := c.c.NewRequest("PUT", "/v1/sys/revoke/"+id)
+ r := c.c.NewRequest("PUT", "/v1/sys/leases/revoke/"+id)
resp, err := c.c.RawRequest(r)
if err == nil {
defer resp.Body.Close()
@@ -30,7 +30,7 @@ func (c *Sys) Revoke(id string) error {
}
func (c *Sys) RevokePrefix(id string) error {
- r := c.c.NewRequest("PUT", "/v1/sys/revoke-prefix/"+id)
+ r := c.c.NewRequest("PUT", "/v1/sys/leases/revoke-prefix/"+id)
resp, err := c.c.RawRequest(r)
if err == nil {
defer resp.Body.Close()
@@ -39,7 +39,7 @@ func (c *Sys) RevokePrefix(id string) error {
}
func (c *Sys) RevokeForce(id string) error {
- r := c.c.NewRequest("PUT", "/v1/sys/revoke-force/"+id)
+ r := c.c.NewRequest("PUT", "/v1/sys/leases/revoke-force/"+id)
resp, err := c.c.RawRequest(r)
if err == nil {
defer resp.Body.Close()
diff --git a/vendor/github.com/hashicorp/vault/api/sys_mounts.go b/vendor/github.com/hashicorp/vault/api/sys_mounts.go
index 907fddb..091a8f6 100644
--- a/vendor/github.com/hashicorp/vault/api/sys_mounts.go
+++ b/vendor/github.com/hashicorp/vault/api/sys_mounts.go
@@ -124,23 +124,27 @@ type MountInput struct {
Description string `json:"description" structs:"description"`
Config MountConfigInput `json:"config" structs:"config"`
Local bool `json:"local" structs:"local"`
+ PluginName string `json:"plugin_name,omitempty" structs:"plugin_name"`
}
type MountConfigInput struct {
DefaultLeaseTTL string `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"`
MaxLeaseTTL string `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"`
ForceNoCache bool `json:"force_no_cache" structs:"force_no_cache" mapstructure:"force_no_cache"`
+ PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty" mapstructure:"plugin_name"`
}
type MountOutput struct {
Type string `json:"type" structs:"type"`
Description string `json:"description" structs:"description"`
+ Accessor string `json:"accessor" structs:"accessor"`
Config MountConfigOutput `json:"config" structs:"config"`
Local bool `json:"local" structs:"local"`
}
type MountConfigOutput struct {
- DefaultLeaseTTL int `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"`
- MaxLeaseTTL int `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"`
- ForceNoCache bool `json:"force_no_cache" structs:"force_no_cache" mapstructure:"force_no_cache"`
+ DefaultLeaseTTL int `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"`
+ MaxLeaseTTL int `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"`
+ ForceNoCache bool `json:"force_no_cache" structs:"force_no_cache" mapstructure:"force_no_cache"`
+ PluginName string `json:"plugin_name,omitempty" structs:"plugin_name,omitempty" mapstructure:"plugin_name"`
}
diff --git a/vendor/github.com/hashicorp/vault/helper/compressutil/compress.go b/vendor/github.com/hashicorp/vault/helper/compressutil/compress.go
index e485f2f..31a2dcd 100644
--- a/vendor/github.com/hashicorp/vault/helper/compressutil/compress.go
+++ b/vendor/github.com/hashicorp/vault/helper/compressutil/compress.go
@@ -6,6 +6,8 @@ import (
"compress/lzw"
"fmt"
"io"
+
+ "github.com/golang/snappy"
)
const (
@@ -20,16 +22,35 @@ const (
// Byte value used as canary when using Lzw format
CompressionCanaryLzw byte = 'L'
+ // Byte value used as canary when using Snappy format
+ CompressionCanarySnappy byte = 'S'
+
CompressionTypeLzw = "lzw"
CompressionTypeGzip = "gzip"
+
+ CompressionTypeSnappy = "snappy"
)
+// SnappyReadCloser embeds the snappy reader which implements the io.Reader
+// interface. The decompress procedure in this utility expectes an
+// io.ReadCloser. This type implements the io.Closer interface to retain the
+// generic way of decompression.
+type SnappyReadCloser struct {
+ *snappy.Reader
+}
+
+// Close is a noop method implemented only to satisfy the io.Closer interface
+func (s *SnappyReadCloser) Close() error {
+ return nil
+}
+
// CompressionConfig is used to select a compression type to be performed by
// Compress and Decompress utilities.
// Supported types are:
// * CompressionTypeLzw
// * CompressionTypeGzip
+// * CompressionTypeSnappy
//
// When using CompressionTypeGzip, the compression levels can also be chosen:
// * gzip.DefaultCompression
@@ -78,9 +99,13 @@ func Compress(data []byte, config *CompressionConfig) ([]byte, error) {
config.GzipCompressionLevel = gzip.DefaultCompression
}
writer, err = gzip.NewWriterLevel(&buf, config.GzipCompressionLevel)
+ case CompressionTypeSnappy:
+ buf.Write([]byte{CompressionCanarySnappy})
+ writer = snappy.NewBufferedWriter(&buf)
default:
return nil, fmt.Errorf("unsupported compression type")
}
+
if err != nil {
return nil, fmt.Errorf("failed to create a compression writer; err: %v", err)
}
@@ -117,22 +142,29 @@ func Decompress(data []byte) ([]byte, bool, error) {
}
switch {
+ // If the first byte matches the canary byte, remove the canary
+ // byte and try to decompress the data that is after the canary.
case data[0] == CompressionCanaryGzip:
- // If the first byte matches the canary byte, remove the canary
- // byte and try to decompress the data that is after the canary.
if len(data) < 2 {
return nil, false, fmt.Errorf("invalid 'data' after the canary")
}
data = data[1:]
reader, err = gzip.NewReader(bytes.NewReader(data))
case data[0] == CompressionCanaryLzw:
- // If the first byte matches the canary byte, remove the canary
- // byte and try to decompress the data that is after the canary.
if len(data) < 2 {
return nil, false, fmt.Errorf("invalid 'data' after the canary")
}
data = data[1:]
reader = lzw.NewReader(bytes.NewReader(data), lzw.LSB, 8)
+
+ case data[0] == CompressionCanarySnappy:
+ if len(data) < 2 {
+ return nil, false, fmt.Errorf("invalid 'data' after the canary")
+ }
+ data = data[1:]
+ reader = &SnappyReadCloser{
+ Reader: snappy.NewReader(bytes.NewReader(data)),
+ }
default:
// If the first byte doesn't match the canary byte, it means
// that the content was not compressed at all. Indicate the
diff --git a/vendor/github.com/hashicorp/vault/helper/parseutil/parseutil.go b/vendor/github.com/hashicorp/vault/helper/parseutil/parseutil.go
new file mode 100644
index 0000000..957d533
--- /dev/null
+++ b/vendor/github.com/hashicorp/vault/helper/parseutil/parseutil.go
@@ -0,0 +1,65 @@
+package parseutil
+
+import (
+ "encoding/json"
+ "errors"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/mitchellh/mapstructure"
+)
+
+func ParseDurationSecond(in interface{}) (time.Duration, error) {
+ var dur time.Duration
+ jsonIn, ok := in.(json.Number)
+ if ok {
+ in = jsonIn.String()
+ }
+ switch in.(type) {
+ case string:
+ inp := in.(string)
+ if inp == "" {
+ return time.Duration(0), nil
+ }
+ var err error
+ // Look for a suffix otherwise its a plain second value
+ if strings.HasSuffix(inp, "s") || strings.HasSuffix(inp, "m") || strings.HasSuffix(inp, "h") {
+ dur, err = time.ParseDuration(inp)
+ if err != nil {
+ return dur, err
+ }
+ } else {
+ // Plain integer
+ secs, err := strconv.ParseInt(inp, 10, 64)
+ if err != nil {
+ return dur, err
+ }
+ dur = time.Duration(secs) * time.Second
+ }
+ case int:
+ dur = time.Duration(in.(int)) * time.Second
+ case int32:
+ dur = time.Duration(in.(int32)) * time.Second
+ case int64:
+ dur = time.Duration(in.(int64)) * time.Second
+ case uint:
+ dur = time.Duration(in.(uint)) * time.Second
+ case uint32:
+ dur = time.Duration(in.(uint32)) * time.Second
+ case uint64:
+ dur = time.Duration(in.(uint64)) * time.Second
+ default:
+ return 0, errors.New("could not parse duration from input")
+ }
+
+ return dur, nil
+}
+
+func ParseBool(in interface{}) (bool, error) {
+ var result bool
+ if err := mapstructure.WeakDecode(in, &result); err != nil {
+ return false, err
+ }
+ return result, nil
+}
diff --git a/vendor/github.com/jmoiron/sqlx/README.md b/vendor/github.com/jmoiron/sqlx/README.md
index e3956e8..5c1bb3c 100644
--- a/vendor/github.com/jmoiron/sqlx/README.md
+++ b/vendor/github.com/jmoiron/sqlx/README.md
@@ -1,4 +1,4 @@
-#sqlx
+# sqlx
[![Build Status](https://drone.io/github.com/jmoiron/sqlx/status.png)](https://drone.io/github.com/jmoiron/sqlx/latest) [![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/jmoiron/sqlx) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/jmoiron/sqlx/master/LICENSE)
diff --git a/vendor/github.com/jmoiron/sqlx/sqlx_context.go b/vendor/github.com/jmoiron/sqlx/sqlx_context.go
index 0173056..0b17145 100644
--- a/vendor/github.com/jmoiron/sqlx/sqlx_context.go
+++ b/vendor/github.com/jmoiron/sqlx/sqlx_context.go
@@ -273,6 +273,12 @@ func (tx *Tx) QueryRowxContext(ctx context.Context, query string, args ...interf
return &Row{rows: rows, err: err, unsafe: tx.unsafe, Mapper: tx.Mapper}
}
+// NamedExecContext using this Tx.
+// Any named placeholder parameters are replaced with fields from arg.
+func (tx *Tx) NamedExecContext(ctx context.Context, query string, arg interface{}) (sql.Result, error) {
+ return NamedExecContext(ctx, tx, query, arg)
+}
+
// SelectContext using the prepared statement.
// Any placeholder parameters are replaced with supplied args.
func (s *Stmt) SelectContext(ctx context.Context, dest interface{}, args ...interface{}) error {
diff --git a/vendor/github.com/magiconair/properties/CHANGELOG.md b/vendor/github.com/magiconair/properties/CHANGELOG.md
index 1d86d0c..ebd1bcd 100644
--- a/vendor/github.com/magiconair/properties/CHANGELOG.md
+++ b/vendor/github.com/magiconair/properties/CHANGELOG.md
@@ -2,6 +2,11 @@
### Unreleased
+ * [PR #24](https://github.com/magiconair/properties/pull/24): Update keys when DisableExpansion is enabled
+ Thanks to @mgurov for the fix.
+
+### [1.7.3](https://github.com/magiconair/properties/tags/v1.7.3) - 10 Jul 2017
+
* [Issue #17](https://github.com/magiconair/properties/issues/17): Add [SetValue()](http://godoc.org/github.com/magiconair/properties#Properties.SetValue) method to set values generically
* [Issue #22](https://github.com/magiconair/properties/issues/22): Add [LoadMap()](http://godoc.org/github.com/magiconair/properties#LoadMap) function to load properties from a string map
diff --git a/vendor/github.com/magiconair/properties/README.md b/vendor/github.com/magiconair/properties/README.md
index 71b6a53..eb3b8c4 100644
--- a/vendor/github.com/magiconair/properties/README.md
+++ b/vendor/github.com/magiconair/properties/README.md
@@ -1,7 +1,7 @@
Overview [![Build Status](https://travis-ci.org/magiconair/properties.svg?branch=master)](https://travis-ci.org/magiconair/properties)
========
-#### Current version: 1.7.2
+#### Current version: 1.7.3
properties is a Go library for reading and writing properties files.
diff --git a/vendor/github.com/magiconair/properties/properties.go b/vendor/github.com/magiconair/properties/properties.go
index 4f3d5a4..85bb186 100644
--- a/vendor/github.com/magiconair/properties/properties.go
+++ b/vendor/github.com/magiconair/properties/properties.go
@@ -511,6 +511,9 @@ func (p *Properties) Set(key, value string) (prev string, ok bool, err error) {
if p.DisableExpansion {
prev, ok = p.Get(key)
p.m[key] = value
+ if !ok {
+ p.k = append(p.k, key)
+ }
return prev, ok, nil
}
diff --git a/vendor/github.com/mattn/go-sqlite3/README.md b/vendor/github.com/mattn/go-sqlite3/README.md
index 8369013..ad00f10 100644
--- a/vendor/github.com/mattn/go-sqlite3/README.md
+++ b/vendor/github.com/mattn/go-sqlite3/README.md
@@ -17,11 +17,11 @@ Installation
This package can be installed with the go get command:
go get github.com/mattn/go-sqlite3
-
+
_go-sqlite3_ is *cgo* package.
If you want to build your app using go-sqlite3, you need gcc.
However, if you install _go-sqlite3_ with `go install github.com/mattn/go-sqlite3`, you don't need gcc to build your app anymore.
-
+
Documentation
-------------
@@ -50,7 +50,7 @@ FAQ
* Can't build go-sqlite3 on windows 64bit.
- > Probably, you are using go 1.0, go1.0 has a problem when it comes to compiling/linking on windows 64bit.
+ > Probably, you are using go 1.0, go1.0 has a problem when it comes to compiling/linking on windows 64bit.
> See: [#27](https://github.com/mattn/go-sqlite3/issues/27)
* Getting insert error while query is opened.
@@ -65,13 +65,13 @@ FAQ
* Want to get time.Time with current locale
- Use `loc=auto` in SQLite3 filename schema like `file:foo.db?loc=auto`.
+ Use `_loc=auto` in SQLite3 filename schema like `file:foo.db?_loc=auto`.
-* Can use this in multiple routines concurrently?
+* Can I use this in multiple routines concurrently?
Yes for readonly. But, No for writable. See [#50](https://github.com/mattn/go-sqlite3/issues/50), [#51](https://github.com/mattn/go-sqlite3/issues/51), [#209](https://github.com/mattn/go-sqlite3/issues/209).
-* Why is it racy if I use a `sql.Open("sqlite", ":memory:")` database?
+* Why is it racy if I use a `sql.Open("sqlite3", ":memory:")` database?
Each connection to :memory: opens a brand new in-memory sql database, so if
the stdlib's sql engine happens to open another connection and you've only
diff --git a/vendor/github.com/mattn/go-sqlite3/callback.go b/vendor/github.com/mattn/go-sqlite3/callback.go
index 48fc63a..29ece3d 100644
--- a/vendor/github.com/mattn/go-sqlite3/callback.go
+++ b/vendor/github.com/mattn/go-sqlite3/callback.go
@@ -53,6 +53,30 @@ func doneTrampoline(ctx *C.sqlite3_context) {
ai.Done(ctx)
}
+//export compareTrampoline
+func compareTrampoline(handlePtr uintptr, la C.int, a *C.char, lb C.int, b *C.char) C.int {
+ cmp := lookupHandle(handlePtr).(func(string, string) int)
+ return C.int(cmp(C.GoStringN(a, la), C.GoStringN(b, lb)))
+}
+
+//export commitHookTrampoline
+func commitHookTrampoline(handle uintptr) int {
+ callback := lookupHandle(handle).(func() int)
+ return callback()
+}
+
+//export rollbackHookTrampoline
+func rollbackHookTrampoline(handle uintptr) {
+ callback := lookupHandle(handle).(func())
+ callback()
+}
+
+//export updateHookTrampoline
+func updateHookTrampoline(handle uintptr, op int, db *C.char, table *C.char, rowid int64) {
+ callback := lookupHandle(handle).(func(int, string, string, int64))
+ callback(op, C.GoString(db), C.GoString(table), rowid)
+}
+
// Use handles to avoid passing Go pointers to C.
type handleVal struct {
diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3.go b/vendor/github.com/mattn/go-sqlite3/sqlite3.go
index 33b9b9c..1ff58c3 100644
--- a/vendor/github.com/mattn/go-sqlite3/sqlite3.go
+++ b/vendor/github.com/mattn/go-sqlite3/sqlite3.go
@@ -7,7 +7,7 @@ package sqlite3
/*
#cgo CFLAGS: -std=gnu99
-#cgo CFLAGS: -DSQLITE_ENABLE_RTREE -DSQLITE_THREADSAFE
+#cgo CFLAGS: -DSQLITE_ENABLE_RTREE -DSQLITE_THREADSAFE=1
#cgo CFLAGS: -DSQLITE_ENABLE_FTS3 -DSQLITE_ENABLE_FTS3_PARENTHESIS -DSQLITE_ENABLE_FTS4_UNICODE61
#cgo CFLAGS: -DSQLITE_TRACE_SIZE_LIMIT=15
#cgo CFLAGS: -DSQLITE_DISABLE_INTRINSIC
@@ -100,6 +100,11 @@ int _sqlite3_create_function(
}
void callbackTrampoline(sqlite3_context*, int, sqlite3_value**);
+
+int compareTrampoline(void*, int, char*, int, char*);
+int commitHookTrampoline(void*);
+void rollbackHookTrampoline(void*);
+void updateHookTrampoline(void*, int, char*, char*, sqlite3_int64);
*/
import "C"
import (
@@ -113,6 +118,7 @@ import (
"runtime"
"strconv"
"strings"
+ "sync"
"time"
"unsafe"
@@ -149,6 +155,12 @@ func Version() (libVersion string, libVersionNumber int, sourceID string) {
return libVersion, libVersionNumber, sourceID
}
+const (
+ SQLITE_DELETE = C.SQLITE_DELETE
+ SQLITE_INSERT = C.SQLITE_INSERT
+ SQLITE_UPDATE = C.SQLITE_UPDATE
+)
+
// SQLiteDriver implement sql.Driver.
type SQLiteDriver struct {
Extensions []string
@@ -157,6 +169,7 @@ type SQLiteDriver struct {
// SQLiteConn implement sql.Conn.
type SQLiteConn struct {
+ mu sync.Mutex
db *C.sqlite3
loc *time.Location
txlock string
@@ -171,6 +184,7 @@ type SQLiteTx struct {
// SQLiteStmt implement sql.Stmt.
type SQLiteStmt struct {
+ mu sync.Mutex
c *SQLiteConn
s *C.sqlite3_stmt
t string
@@ -191,6 +205,7 @@ type SQLiteRows struct {
cols []string
decltype []string
cls bool
+ closed bool
done chan struct{}
}
@@ -313,6 +328,74 @@ func (tx *SQLiteTx) Rollback() error {
return err
}
+// RegisterCollation makes a Go function available as a collation.
+//
+// cmp receives two UTF-8 strings, a and b. The result should be 0 if
+// a==b, -1 if a < b, and +1 if a > b.
+//
+// cmp must always return the same result given the same
+// inputs. Additionally, it must have the following properties for all
+// strings A, B and C: if A==B then B==A; if A==B and B==C then A==C;
+// if A<B then B>A; if A<B and B<C then A<C.
+//
+// If cmp does not obey these constraints, sqlite3's behavior is
+// undefined when the collation is used.
+func (c *SQLiteConn) RegisterCollation(name string, cmp func(string, string) int) error {
+ handle := newHandle(c, cmp)
+ cname := C.CString(name)
+ defer C.free(unsafe.Pointer(cname))
+ rv := C.sqlite3_create_collation(c.db, cname, C.SQLITE_UTF8, unsafe.Pointer(handle), (*[0]byte)(unsafe.Pointer(C.compareTrampoline)))
+ if rv != C.SQLITE_OK {
+ return c.lastError()
+ }
+ return nil
+}
+
+// RegisterCommitHook sets the commit hook for a connection.
+//
+// If the callback returns non-zero the transaction will become a rollback.
+//
+// If there is an existing commit hook for this connection, it will be
+// removed. If callback is nil the existing hook (if any) will be removed
+// without creating a new one.
+func (c *SQLiteConn) RegisterCommitHook(callback func() int) {
+ if callback == nil {
+ C.sqlite3_commit_hook(c.db, nil, nil)
+ } else {
+ C.sqlite3_commit_hook(c.db, (*[0]byte)(unsafe.Pointer(C.commitHookTrampoline)), unsafe.Pointer(newHandle(c, callback)))
+ }
+}
+
+// RegisterRollbackHook sets the rollback hook for a connection.
+//
+// If there is an existing rollback hook for this connection, it will be
+// removed. If callback is nil the existing hook (if any) will be removed
+// without creating a new one.
+func (c *SQLiteConn) RegisterRollbackHook(callback func()) {
+ if callback == nil {
+ C.sqlite3_rollback_hook(c.db, nil, nil)
+ } else {
+ C.sqlite3_rollback_hook(c.db, (*[0]byte)(unsafe.Pointer(C.rollbackHookTrampoline)), unsafe.Pointer(newHandle(c, callback)))
+ }
+}
+
+// RegisterUpdateHook sets the update hook for a connection.
+//
+// The parameters to the callback are the operation (one of the constants
+// SQLITE_INSERT, SQLITE_DELETE, or SQLITE_UPDATE), the database name, the
+// table name, and the rowid.
+//
+// If there is an existing update hook for this connection, it will be
+// removed. If callback is nil the existing hook (if any) will be removed
+// without creating a new one.
+func (c *SQLiteConn) RegisterUpdateHook(callback func(int, string, string, int64)) {
+ if callback == nil {
+ C.sqlite3_update_hook(c.db, nil, nil)
+ } else {
+ C.sqlite3_update_hook(c.db, (*[0]byte)(unsafe.Pointer(C.updateHookTrampoline)), unsafe.Pointer(newHandle(c, callback)))
+ }
+}
+
// RegisterFunc makes a Go function available as a SQLite function.
//
// The Go function can have arguments of the following types: any
@@ -543,6 +626,8 @@ func errorString(err Error) string {
// "deferred", "exclusive".
// _foreign_keys=X
// Enable or disable enforcement of foreign keys. X can be 1 or 0.
+// _recursive_triggers=X
+// Enable or disable recursive triggers. X can be 1 or 0.
func (d *SQLiteDriver) Open(dsn string) (driver.Conn, error) {
if C.sqlite3_threadsafe() == 0 {
return nil, errors.New("sqlite library was not compiled for thread-safe operation")
@@ -552,6 +637,7 @@ func (d *SQLiteDriver) Open(dsn string) (driver.Conn, error) {
txlock := "BEGIN"
busyTimeout := 5000
foreignKeys := -1
+ recursiveTriggers := -1
pos := strings.IndexRune(dsn, '?')
if pos >= 1 {
params, err := url.ParseQuery(dsn[pos+1:])
@@ -606,6 +692,18 @@ func (d *SQLiteDriver) Open(dsn string) (driver.Conn, error) {
}
}
+ // _recursive_triggers
+ if val := params.Get("_recursive_triggers"); val != "" {
+ switch val {
+ case "1":
+ recursiveTriggers = 1
+ case "0":
+ recursiveTriggers = 0
+ default:
+ return nil, fmt.Errorf("Invalid _recursive_triggers: %v", val)
+ }
+ }
+
if !strings.HasPrefix(dsn, "file:") {
dsn = dsn[:pos]
}
@@ -652,6 +750,17 @@ func (d *SQLiteDriver) Open(dsn string) (driver.Conn, error) {
return nil, err
}
}
+ if recursiveTriggers == 0 {
+ if err := exec("PRAGMA recursive_triggers = OFF;"); err != nil {
+ C.sqlite3_close_v2(db)
+ return nil, err
+ }
+ } else if recursiveTriggers == 1 {
+ if err := exec("PRAGMA recursive_triggers = ON;"); err != nil {
+ C.sqlite3_close_v2(db)
+ return nil, err
+ }
+ }
conn := &SQLiteConn{db: db, loc: loc, txlock: txlock}
@@ -679,11 +788,22 @@ func (c *SQLiteConn) Close() error {
return c.lastError()
}
deleteHandles(c)
+ c.mu.Lock()
c.db = nil
+ c.mu.Unlock()
runtime.SetFinalizer(c, nil)
return nil
}
+func (c *SQLiteConn) dbConnOpen() bool {
+ if c == nil {
+ return false
+ }
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ return c.db != nil
+}
+
// Prepare the query string. Return a new statement.
func (c *SQLiteConn) Prepare(query string) (driver.Stmt, error) {
return c.prepare(context.Background(), query)
@@ -709,14 +829,17 @@ func (c *SQLiteConn) prepare(ctx context.Context, query string) (driver.Stmt, er
// Close the statement.
func (s *SQLiteStmt) Close() error {
+ s.mu.Lock()
+ defer s.mu.Unlock()
if s.closed {
return nil
}
s.closed = true
- if s.c == nil || s.c.db == nil {
+ if !s.c.dbConnOpen() {
return errors.New("sqlite statement with already closed database connection")
}
rv := C.sqlite3_finalize(s.s)
+ s.s = nil
if rv != C.SQLITE_OK {
return s.c.lastError()
}
@@ -734,6 +857,8 @@ type bindArg struct {
v driver.Value
}
+var placeHolder = []byte{0}
+
func (s *SQLiteStmt) bind(args []namedValue) error {
rv := C.sqlite3_reset(s.s)
if rv != C.SQLITE_ROW && rv != C.SQLITE_OK && rv != C.SQLITE_DONE {
@@ -755,8 +880,7 @@ func (s *SQLiteStmt) bind(args []namedValue) error {
rv = C.sqlite3_bind_null(s.s, n)
case string:
if len(v) == 0 {
- b := []byte{0}
- rv = C._sqlite3_bind_text(s.s, n, (*C.char)(unsafe.Pointer(&b[0])), C.int(0))
+ rv = C._sqlite3_bind_text(s.s, n, (*C.char)(unsafe.Pointer(&placeHolder[0])), C.int(0))
} else {
b := []byte(v)
rv = C._sqlite3_bind_text(s.s, n, (*C.char)(unsafe.Pointer(&b[0])), C.int(len(b)))
@@ -772,11 +896,11 @@ func (s *SQLiteStmt) bind(args []namedValue) error {
case float64:
rv = C.sqlite3_bind_double(s.s, n, C.double(v))
case []byte:
- if len(v) == 0 {
- rv = C._sqlite3_bind_blob(s.s, n, nil, 0)
- } else {
- rv = C._sqlite3_bind_blob(s.s, n, unsafe.Pointer(&v[0]), C.int(len(v)))
+ ln := len(v)
+ if ln == 0 {
+ v = placeHolder
}
+ rv = C._sqlite3_bind_blob(s.s, n, unsafe.Pointer(&v[0]), C.int(ln))
case time.Time:
b := []byte(v.Format(SQLiteTimestampFormats[0]))
rv = C._sqlite3_bind_text(s.s, n, (*C.char)(unsafe.Pointer(&b[0])), C.int(len(b)))
@@ -811,6 +935,7 @@ func (s *SQLiteStmt) query(ctx context.Context, args []namedValue) (driver.Rows,
cols: nil,
decltype: nil,
cls: s.cls,
+ closed: false,
done: make(chan struct{}),
}
@@ -883,25 +1008,33 @@ func (s *SQLiteStmt) exec(ctx context.Context, args []namedValue) (driver.Result
// Close the rows.
func (rc *SQLiteRows) Close() error {
- if rc.s.closed {
+ rc.s.mu.Lock()
+ if rc.s.closed || rc.closed {
+ rc.s.mu.Unlock()
return nil
}
+ rc.closed = true
if rc.done != nil {
close(rc.done)
}
if rc.cls {
+ rc.s.mu.Unlock()
return rc.s.Close()
}
rv := C.sqlite3_reset(rc.s.s)
if rv != C.SQLITE_OK {
+ rc.s.mu.Unlock()
return rc.s.c.lastError()
}
+ rc.s.mu.Unlock()
return nil
}
// Columns return column names.
func (rc *SQLiteRows) Columns() []string {
- if rc.nc != len(rc.cols) {
+ rc.s.mu.Lock()
+ defer rc.s.mu.Unlock()
+ if rc.s.s != nil && rc.nc != len(rc.cols) {
rc.cols = make([]string, rc.nc)
for i := 0; i < rc.nc; i++ {
rc.cols[i] = C.GoString(C.sqlite3_column_name(rc.s.s, C.int(i)))
@@ -910,9 +1043,8 @@ func (rc *SQLiteRows) Columns() []string {
return rc.cols
}
-// DeclTypes return column types.
-func (rc *SQLiteRows) DeclTypes() []string {
- if rc.decltype == nil {
+func (rc *SQLiteRows) declTypes() []string {
+ if rc.s.s != nil && rc.decltype == nil {
rc.decltype = make([]string, rc.nc)
for i := 0; i < rc.nc; i++ {
rc.decltype[i] = strings.ToLower(C.GoString(C.sqlite3_column_decltype(rc.s.s, C.int(i))))
@@ -921,8 +1053,20 @@ func (rc *SQLiteRows) DeclTypes() []string {
return rc.decltype
}
+// DeclTypes return column types.
+func (rc *SQLiteRows) DeclTypes() []string {
+ rc.s.mu.Lock()
+ defer rc.s.mu.Unlock()
+ return rc.declTypes()
+}
+
// Next move cursor to next.
func (rc *SQLiteRows) Next(dest []driver.Value) error {
+ if rc.s.closed {
+ return io.EOF
+ }
+ rc.s.mu.Lock()
+ defer rc.s.mu.Unlock()
rv := C.sqlite3_step(rc.s.s)
if rv == C.SQLITE_DONE {
return io.EOF
@@ -935,7 +1079,7 @@ func (rc *SQLiteRows) Next(dest []driver.Value) error {
return nil
}
- rc.DeclTypes()
+ rc.declTypes()
for i := range dest {
switch C.sqlite3_column_type(rc.s.s, C.int(i)) {
@@ -948,10 +1092,11 @@ func (rc *SQLiteRows) Next(dest []driver.Value) error {
// large to be a reasonable timestamp in seconds.
if val > 1e12 || val < -1e12 {
val *= int64(time.Millisecond) // convert ms to nsec
+ t = time.Unix(0, val)
} else {
- val *= int64(time.Second) // convert sec to nsec
+ t = time.Unix(val, 0)
}
- t = time.Unix(0, val).UTC()
+ t = t.UTC()
if rc.s.c.loc != nil {
t = t.In(rc.s.c.loc)
}
diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_libsqlite3.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_libsqlite3.go
index 135863e..e4557e6 100644
--- a/vendor/github.com/mattn/go-sqlite3/sqlite3_libsqlite3.go
+++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_libsqlite3.go
@@ -10,5 +10,6 @@ package sqlite3
#cgo CFLAGS: -DUSE_LIBSQLITE3
#cgo linux LDFLAGS: -lsqlite3
#cgo darwin LDFLAGS: -L/usr/local/opt/sqlite/lib -lsqlite3
+#cgo solaris LDFLAGS: -lsqlite3
*/
import "C"
diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_other.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_other.go
index a20d02c..f721b5e 100644
--- a/vendor/github.com/mattn/go-sqlite3/sqlite3_other.go
+++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_other.go
@@ -9,5 +9,6 @@ package sqlite3
/*
#cgo CFLAGS: -I.
#cgo linux LDFLAGS: -ldl
+#cgo solaris LDFLAGS: -lc
*/
import "C"
diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go
index 115ae67..afcfd5e 100644
--- a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go
+++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go
@@ -38,12 +38,6 @@ func DecodeHookExec(
raw DecodeHookFunc,
from reflect.Type, to reflect.Type,
data interface{}) (interface{}, error) {
- // Build our arguments that reflect expects
- argVals := make([]reflect.Value, 3)
- argVals[0] = reflect.ValueOf(from)
- argVals[1] = reflect.ValueOf(to)
- argVals[2] = reflect.ValueOf(data)
-
switch f := typedDecodeHook(raw).(type) {
case DecodeHookFuncType:
return f(from, to, data)
@@ -121,6 +115,11 @@ func StringToTimeDurationHookFunc() DecodeHookFunc {
}
}
+// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to
+// the decoder.
+//
+// Note that this is significantly different from the WeaklyTypedInput option
+// of the DecoderConfig.
func WeaklyTypedHook(
f reflect.Kind,
t reflect.Kind,
@@ -132,9 +131,8 @@ func WeaklyTypedHook(
case reflect.Bool:
if dataVal.Bool() {
return "1", nil
- } else {
- return "0", nil
}
+ return "0", nil
case reflect.Float32:
return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil
case reflect.Int:
diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go
index 6dee0ef..30a9957 100644
--- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go
+++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go
@@ -1,5 +1,5 @@
-// The mapstructure package exposes functionality to convert an
-// arbitrary map[string]interface{} into a native Go structure.
+// Package mapstructure exposes functionality to convert an arbitrary
+// map[string]interface{} into a native Go structure.
//
// The Go structure can be arbitrarily complex, containing slices,
// other structs, etc. and the decoder will properly decode nested
@@ -32,7 +32,12 @@ import (
// both.
type DecodeHookFunc interface{}
+// DecodeHookFuncType is a DecodeHookFunc which has complete information about
+// the source and target types.
type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error)
+
+// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the
+// source and target types.
type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error)
// DecoderConfig is the configuration that is used to create a new decoder
@@ -436,7 +441,7 @@ func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value)
case dataKind == reflect.Uint:
val.SetFloat(float64(dataVal.Uint()))
case dataKind == reflect.Float32:
- val.SetFloat(float64(dataVal.Float()))
+ val.SetFloat(dataVal.Float())
case dataKind == reflect.Bool && d.config.WeaklyTypedInput:
if dataVal.Bool() {
val.SetFloat(1)
@@ -681,7 +686,11 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value)
// Compile the list of all the fields that we're going to be decoding
// from all the structs.
- fields := make(map[*reflect.StructField]reflect.Value)
+ type field struct {
+ field reflect.StructField
+ val reflect.Value
+ }
+ fields := []field{}
for len(structs) > 0 {
structVal := structs[0]
structs = structs[1:]
@@ -713,14 +722,16 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value)
}
// Normal struct field, store it away
- fields[&fieldType] = structVal.Field(i)
+ fields = append(fields, field{fieldType, structVal.Field(i)})
}
}
- for fieldType, field := range fields {
- fieldName := fieldType.Name
+ // for fieldType, field := range fields {
+ for _, f := range fields {
+ field, fieldValue := f.field, f.val
+ fieldName := field.Name
- tagValue := fieldType.Tag.Get(d.config.TagName)
+ tagValue := field.Tag.Get(d.config.TagName)
tagValue = strings.SplitN(tagValue, ",", 2)[0]
if tagValue != "" {
fieldName = tagValue
@@ -755,14 +766,14 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value)
// Delete the key we're using from the unused map so we stop tracking
delete(dataValKeysUnused, rawMapKey.Interface())
- if !field.IsValid() {
+ if !fieldValue.IsValid() {
// This should never happen
panic("field is not valid")
}
// If we can't set the field, then it is unexported or something,
// and we just continue onwards.
- if !field.CanSet() {
+ if !fieldValue.CanSet() {
continue
}
@@ -772,7 +783,7 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value)
fieldName = fmt.Sprintf("%s.%s", name, fieldName)
}
- if err := d.decode(fieldName, rawMapVal.Interface(), field); err != nil {
+ if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil {
errors = appendErrors(errors, err)
}
}
diff --git a/vendor/github.com/pelletier/go-toml/README.md b/vendor/github.com/pelletier/go-toml/README.md
index b8137e0..2681690 100644
--- a/vendor/github.com/pelletier/go-toml/README.md
+++ b/vendor/github.com/pelletier/go-toml/README.md
@@ -16,65 +16,64 @@ This library supports TOML version
Go-toml provides the following features for using data parsed from TOML documents:
* Load TOML documents from files and string data
-* Easily navigate TOML structure using TomlTree
+* Easily navigate TOML structure using Tree
+* Mashaling and unmarshaling to and from data structures
* Line & column position data for all parsed elements
-* Query support similar to JSON-Path
+* [Query support similar to JSON-Path](query/)
* Syntax errors contain line and column numbers
-Go-toml is designed to help cover use-cases not covered by reflection-based TOML parsing:
+## Import
-* Semantic evaluation of parsed TOML
-* Informing a user of mistakes in the source document, after it has been parsed
-* Programatic handling of default values on a case-by-case basis
-* Using a TOML document as a flexible data-store
+```go
+import "github.com/pelletier/go-toml"
+```
-## Import
+## Usage example
- import "github.com/pelletier/go-toml"
+Read a TOML document:
-## Usage
+```go
+config, _ := toml.Load(`
+[postgres]
+user = "pelletier"
+password = "mypassword"`)
+// retrieve data directly
+user := config.Get("postgres.user").(string)
-### Example
+// or using an intermediate object
+postgresConfig := config.Get("postgres").(*toml.Tree)
+password := postgresConfig.Get("password").(string)
+```
+
+Or use Unmarshal:
-Say you have a TOML file that looks like this:
+```go
+type Postgres struct {
+ User string
+ Password string
+}
+type Config struct {
+ Postgres Postgres
+}
-```toml
+doc := []byte(`
[postgres]
user = "pelletier"
-password = "mypassword"
+password = "mypassword"`)
+
+config := Config{}
+toml.Unmarshal(doc, &config)
+fmt.Println("user=", config.Postgres.User)
```
-Read the username and password like this:
+Or use a query:
```go
-import (
- "fmt"
- "github.com/pelletier/go-toml"
-)
-
-config, err := toml.LoadFile("config.toml")
-if err != nil {
- fmt.Println("Error ", err.Error())
-} else {
- // retrieve data directly
- user := config.Get("postgres.user").(string)
- password := config.Get("postgres.password").(string)
-
- // or using an intermediate object
- configTree := config.Get("postgres").(*toml.TomlTree)
- user = configTree.Get("user").(string)
- password = configTree.Get("password").(string)
- fmt.Println("User is ", user, ". Password is ", password)
-
- // show where elements are in the file
- fmt.Println("User position: %v", configTree.GetPosition("user"))
- fmt.Println("Password position: %v", configTree.GetPosition("password"))
-
- // use a query to gather elements without walking the tree
- results, _ := config.Query("$..[user,password]")
- for ii, item := range results.Values() {
- fmt.Println("Query result %d: %v", ii, item)
- }
+// use a query to gather elements without walking the tree
+q, _ := query.Compile("$..[user,password]")
+results := q.Execute(config)
+for ii, item := range results.Values() {
+ fmt.Println("Query result %d: %v", ii, item)
}
```
diff --git a/vendor/github.com/pelletier/go-toml/benchmark.json b/vendor/github.com/pelletier/go-toml/benchmark.json
new file mode 100644
index 0000000..86f99c6
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/benchmark.json
@@ -0,0 +1,164 @@
+{
+ "array": {
+ "key1": [
+ 1,
+ 2,
+ 3
+ ],
+ "key2": [
+ "red",
+ "yellow",
+ "green"
+ ],
+ "key3": [
+ [
+ 1,
+ 2
+ ],
+ [
+ 3,
+ 4,
+ 5
+ ]
+ ],
+ "key4": [
+ [
+ 1,
+ 2
+ ],
+ [
+ "a",
+ "b",
+ "c"
+ ]
+ ],
+ "key5": [
+ 1,
+ 2,
+ 3
+ ],
+ "key6": [
+ 1,
+ 2
+ ]
+ },
+ "boolean": {
+ "False": false,
+ "True": true
+ },
+ "datetime": {
+ "key1": "1979-05-27T07:32:00Z",
+ "key2": "1979-05-27T00:32:00-07:00",
+ "key3": "1979-05-27T00:32:00.999999-07:00"
+ },
+ "float": {
+ "both": {
+ "key": 6.626e-34
+ },
+ "exponent": {
+ "key1": 5e+22,
+ "key2": 1000000,
+ "key3": -0.02
+ },
+ "fractional": {
+ "key1": 1,
+ "key2": 3.1415,
+ "key3": -0.01
+ },
+ "underscores": {
+ "key1": 9224617.445991227,
+ "key2": 1e+100
+ }
+ },
+ "fruit": [{
+ "name": "apple",
+ "physical": {
+ "color": "red",
+ "shape": "round"
+ },
+ "variety": [{
+ "name": "red delicious"
+ },
+ {
+ "name": "granny smith"
+ }
+ ]
+ },
+ {
+ "name": "banana",
+ "variety": [{
+ "name": "plantain"
+ }]
+ }
+ ],
+ "integer": {
+ "key1": 99,
+ "key2": 42,
+ "key3": 0,
+ "key4": -17,
+ "underscores": {
+ "key1": 1000,
+ "key2": 5349221,
+ "key3": 12345
+ }
+ },
+ "products": [{
+ "name": "Hammer",
+ "sku": 738594937
+ },
+ {},
+ {
+ "color": "gray",
+ "name": "Nail",
+ "sku": 284758393
+ }
+ ],
+ "string": {
+ "basic": {
+ "basic": "I'm a string. \"You can quote me\". Name\tJosé\nLocation\tSF."
+ },
+ "literal": {
+ "multiline": {
+ "lines": "The first newline is\ntrimmed in raw strings.\n All other whitespace\n is preserved.\n",
+ "regex2": "I [dw]on't need \\d{2} apples"
+ },
+ "quoted": "Tom \"Dubs\" Preston-Werner",
+ "regex": "\u003c\\i\\c*\\s*\u003e",
+ "winpath": "C:\\Users\\nodejs\\templates",
+ "winpath2": "\\\\ServerX\\admin$\\system32\\"
+ },
+ "multiline": {
+ "continued": {
+ "key1": "The quick brown fox jumps over the lazy dog.",
+ "key2": "The quick brown fox jumps over the lazy dog.",
+ "key3": "The quick brown fox jumps over the lazy dog."
+ },
+ "key1": "One\nTwo",
+ "key2": "One\nTwo",
+ "key3": "One\nTwo"
+ }
+ },
+ "table": {
+ "inline": {
+ "name": {
+ "first": "Tom",
+ "last": "Preston-Werner"
+ },
+ "point": {
+ "x": 1,
+ "y": 2
+ }
+ },
+ "key": "value",
+ "subtable": {
+ "key": "another value"
+ }
+ },
+ "x": {
+ "y": {
+ "z": {
+ "w": {}
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/pelletier/go-toml/benchmark.sh b/vendor/github.com/pelletier/go-toml/benchmark.sh
new file mode 100755
index 0000000..8b8bb52
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/benchmark.sh
@@ -0,0 +1,32 @@
+#!/bin/bash
+
+set -e
+
+reference_ref=${1:-master}
+reference_git=${2:-.}
+
+if ! `hash benchstat 2>/dev/null`; then
+ echo "Installing benchstat"
+ go get golang.org/x/perf/cmd/benchstat
+ go install golang.org/x/perf/cmd/benchstat
+fi
+
+tempdir=`mktemp -d /tmp/go-toml-benchmark-XXXXXX`
+ref_tempdir="${tempdir}/ref"
+ref_benchmark="${ref_tempdir}/benchmark-`echo -n ${reference_ref}|tr -s '/' '-'`.txt"
+local_benchmark="`pwd`/benchmark-local.txt"
+
+echo "=== ${reference_ref} (${ref_tempdir})"
+git clone ${reference_git} ${ref_tempdir} >/dev/null 2>/dev/null
+pushd ${ref_tempdir} >/dev/null
+git checkout ${reference_ref} >/dev/null 2>/dev/null
+go test -bench=. -benchmem | tee ${ref_benchmark}
+popd >/dev/null
+
+echo ""
+echo "=== local"
+go test -bench=. -benchmem | tee ${local_benchmark}
+
+echo ""
+echo "=== diff"
+benchstat -delta-test=none ${ref_benchmark} ${local_benchmark} \ No newline at end of file
diff --git a/vendor/github.com/pelletier/go-toml/benchmark.toml b/vendor/github.com/pelletier/go-toml/benchmark.toml
new file mode 100644
index 0000000..dfd77e0
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/benchmark.toml
@@ -0,0 +1,244 @@
+################################################################################
+## Comment
+
+# Speak your mind with the hash symbol. They go from the symbol to the end of
+# the line.
+
+
+################################################################################
+## Table
+
+# Tables (also known as hash tables or dictionaries) are collections of
+# key/value pairs. They appear in square brackets on a line by themselves.
+
+[table]
+
+key = "value" # Yeah, you can do this.
+
+# Nested tables are denoted by table names with dots in them. Name your tables
+# whatever crap you please, just don't use #, ., [ or ].
+
+[table.subtable]
+
+key = "another value"
+
+# You don't need to specify all the super-tables if you don't want to. TOML
+# knows how to do it for you.
+
+# [x] you
+# [x.y] don't
+# [x.y.z] need these
+[x.y.z.w] # for this to work
+
+
+################################################################################
+## Inline Table
+
+# Inline tables provide a more compact syntax for expressing tables. They are
+# especially useful for grouped data that can otherwise quickly become verbose.
+# Inline tables are enclosed in curly braces `{` and `}`. No newlines are
+# allowed between the curly braces unless they are valid within a value.
+
+[table.inline]
+
+name = { first = "Tom", last = "Preston-Werner" }
+point = { x = 1, y = 2 }
+
+
+################################################################################
+## String
+
+# There are four ways to express strings: basic, multi-line basic, literal, and
+# multi-line literal. All strings must contain only valid UTF-8 characters.
+
+[string.basic]
+
+basic = "I'm a string. \"You can quote me\". Name\tJos\u00E9\nLocation\tSF."
+
+[string.multiline]
+
+# The following strings are byte-for-byte equivalent:
+key1 = "One\nTwo"
+key2 = """One\nTwo"""
+key3 = """
+One
+Two"""
+
+[string.multiline.continued]
+
+# The following strings are byte-for-byte equivalent:
+key1 = "The quick brown fox jumps over the lazy dog."
+
+key2 = """
+The quick brown \
+
+
+ fox jumps over \
+ the lazy dog."""
+
+key3 = """\
+ The quick brown \
+ fox jumps over \
+ the lazy dog.\
+ """
+
+[string.literal]
+
+# What you see is what you get.
+winpath = 'C:\Users\nodejs\templates'
+winpath2 = '\\ServerX\admin$\system32\'
+quoted = 'Tom "Dubs" Preston-Werner'
+regex = '<\i\c*\s*>'
+
+
+[string.literal.multiline]
+
+regex2 = '''I [dw]on't need \d{2} apples'''
+lines = '''
+The first newline is
+trimmed in raw strings.
+ All other whitespace
+ is preserved.
+'''
+
+
+################################################################################
+## Integer
+
+# Integers are whole numbers. Positive numbers may be prefixed with a plus sign.
+# Negative numbers are prefixed with a minus sign.
+
+[integer]
+
+key1 = +99
+key2 = 42
+key3 = 0
+key4 = -17
+
+[integer.underscores]
+
+# For large numbers, you may use underscores to enhance readability. Each
+# underscore must be surrounded by at least one digit.
+key1 = 1_000
+key2 = 5_349_221
+key3 = 1_2_3_4_5 # valid but inadvisable
+
+
+################################################################################
+## Float
+
+# A float consists of an integer part (which may be prefixed with a plus or
+# minus sign) followed by a fractional part and/or an exponent part.
+
+[float.fractional]
+
+key1 = +1.0
+key2 = 3.1415
+key3 = -0.01
+
+[float.exponent]
+
+key1 = 5e+22
+key2 = 1e6
+key3 = -2E-2
+
+[float.both]
+
+key = 6.626e-34
+
+[float.underscores]
+
+key1 = 9_224_617.445_991_228_313
+key2 = 1e1_00
+
+
+################################################################################
+## Boolean
+
+# Booleans are just the tokens you're used to. Always lowercase.
+
+[boolean]
+
+True = true
+False = false
+
+
+################################################################################
+## Datetime
+
+# Datetimes are RFC 3339 dates.
+
+[datetime]
+
+key1 = 1979-05-27T07:32:00Z
+key2 = 1979-05-27T00:32:00-07:00
+key3 = 1979-05-27T00:32:00.999999-07:00
+
+
+################################################################################
+## Array
+
+# Arrays are square brackets with other primitives inside. Whitespace is
+# ignored. Elements are separated by commas. Data types may not be mixed.
+
+[array]
+
+key1 = [ 1, 2, 3 ]
+key2 = [ "red", "yellow", "green" ]
+key3 = [ [ 1, 2 ], [3, 4, 5] ]
+#key4 = [ [ 1, 2 ], ["a", "b", "c"] ] # this is ok
+
+# Arrays can also be multiline. So in addition to ignoring whitespace, arrays
+# also ignore newlines between the brackets. Terminating commas are ok before
+# the closing bracket.
+
+key5 = [
+ 1, 2, 3
+]
+key6 = [
+ 1,
+ 2, # this is ok
+]
+
+
+################################################################################
+## Array of Tables
+
+# These can be expressed by using a table name in double brackets. Each table
+# with the same double bracketed name will be an element in the array. The
+# tables are inserted in the order encountered.
+
+[[products]]
+
+name = "Hammer"
+sku = 738594937
+
+[[products]]
+
+[[products]]
+
+name = "Nail"
+sku = 284758393
+color = "gray"
+
+
+# You can create nested arrays of tables as well.
+
+[[fruit]]
+ name = "apple"
+
+ [fruit.physical]
+ color = "red"
+ shape = "round"
+
+ [[fruit.variety]]
+ name = "red delicious"
+
+ [[fruit.variety]]
+ name = "granny smith"
+
+[[fruit]]
+ name = "banana"
+
+ [[fruit.variety]]
+ name = "plantain"
diff --git a/vendor/github.com/pelletier/go-toml/benchmark.yml b/vendor/github.com/pelletier/go-toml/benchmark.yml
new file mode 100644
index 0000000..0bd19f0
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/benchmark.yml
@@ -0,0 +1,121 @@
+---
+array:
+ key1:
+ - 1
+ - 2
+ - 3
+ key2:
+ - red
+ - yellow
+ - green
+ key3:
+ - - 1
+ - 2
+ - - 3
+ - 4
+ - 5
+ key4:
+ - - 1
+ - 2
+ - - a
+ - b
+ - c
+ key5:
+ - 1
+ - 2
+ - 3
+ key6:
+ - 1
+ - 2
+boolean:
+ 'False': false
+ 'True': true
+datetime:
+ key1: '1979-05-27T07:32:00Z'
+ key2: '1979-05-27T00:32:00-07:00'
+ key3: '1979-05-27T00:32:00.999999-07:00'
+float:
+ both:
+ key: 6.626e-34
+ exponent:
+ key1: 5.0e+22
+ key2: 1000000
+ key3: -0.02
+ fractional:
+ key1: 1
+ key2: 3.1415
+ key3: -0.01
+ underscores:
+ key1: 9224617.445991227
+ key2: 1.0e+100
+fruit:
+- name: apple
+ physical:
+ color: red
+ shape: round
+ variety:
+ - name: red delicious
+ - name: granny smith
+- name: banana
+ variety:
+ - name: plantain
+integer:
+ key1: 99
+ key2: 42
+ key3: 0
+ key4: -17
+ underscores:
+ key1: 1000
+ key2: 5349221
+ key3: 12345
+products:
+- name: Hammer
+ sku: 738594937
+- {}
+- color: gray
+ name: Nail
+ sku: 284758393
+string:
+ basic:
+ basic: "I'm a string. \"You can quote me\". Name\tJosé\nLocation\tSF."
+ literal:
+ multiline:
+ lines: |
+ The first newline is
+ trimmed in raw strings.
+ All other whitespace
+ is preserved.
+ regex2: I [dw]on't need \d{2} apples
+ quoted: Tom "Dubs" Preston-Werner
+ regex: "<\\i\\c*\\s*>"
+ winpath: C:\Users\nodejs\templates
+ winpath2: "\\\\ServerX\\admin$\\system32\\"
+ multiline:
+ continued:
+ key1: The quick brown fox jumps over the lazy dog.
+ key2: The quick brown fox jumps over the lazy dog.
+ key3: The quick brown fox jumps over the lazy dog.
+ key1: |-
+ One
+ Two
+ key2: |-
+ One
+ Two
+ key3: |-
+ One
+ Two
+table:
+ inline:
+ name:
+ first: Tom
+ last: Preston-Werner
+ point:
+ x: 1
+ y: 2
+ key: value
+ subtable:
+ key: another value
+x:
+ y:
+ z:
+ w: {}
diff --git a/vendor/github.com/pelletier/go-toml/clean.sh b/vendor/github.com/pelletier/go-toml/clean.sh
deleted file mode 100755
index 44d49d9..0000000
--- a/vendor/github.com/pelletier/go-toml/clean.sh
+++ /dev/null
@@ -1,6 +0,0 @@
-#!/bin/bash
-# fail out of the script if anything here fails
-set -e
-
-# clear out stuff generated by test.sh
-rm -rf src test_program_bin toml-test
diff --git a/vendor/github.com/pelletier/go-toml/doc.go b/vendor/github.com/pelletier/go-toml/doc.go
index 9156b73..3c89619 100644
--- a/vendor/github.com/pelletier/go-toml/doc.go
+++ b/vendor/github.com/pelletier/go-toml/doc.go
@@ -1,250 +1,23 @@
-// Package toml is a TOML markup language parser.
+// Package toml is a TOML parser and manipulation library.
//
// This version supports the specification as described in
// https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md
//
-// TOML Parsing
+// Marshaling
//
-// TOML data may be parsed in two ways: by file, or by string.
+// Go-toml can marshal and unmarshal TOML documents from and to data
+// structures.
//
-// // load TOML data by filename
-// tree, err := toml.LoadFile("filename.toml")
+// TOML document as a tree
//
-// // load TOML data stored in a string
-// tree, err := toml.Load(stringContainingTomlData)
+// Go-toml can operate on a TOML document as a tree. Use one of the Load*
+// functions to parse TOML data and obtain a Tree instance, then one of its
+// methods to manipulate the tree.
//
-// Either way, the result is a TomlTree object that can be used to navigate the
-// structure and data within the original document.
+// JSONPath-like queries
//
-//
-// Getting data from the TomlTree
-//
-// After parsing TOML data with Load() or LoadFile(), use the Has() and Get()
-// methods on the returned TomlTree, to find your way through the document data.
-//
-// if tree.Has("foo") {
-// fmt.Println("foo is:", tree.Get("foo"))
-// }
-//
-// Working with Paths
-//
-// Go-toml has support for basic dot-separated key paths on the Has(), Get(), Set()
-// and GetDefault() methods. These are the same kind of key paths used within the
-// TOML specification for struct tames.
-//
-// // looks for a key named 'baz', within struct 'bar', within struct 'foo'
-// tree.Has("foo.bar.baz")
-//
-// // returns the key at this path, if it is there
-// tree.Get("foo.bar.baz")
-//
-// TOML allows keys to contain '.', which can cause this syntax to be problematic
-// for some documents. In such cases, use the GetPath(), HasPath(), and SetPath(),
-// methods to explicitly define the path. This form is also faster, since
-// it avoids having to parse the passed key for '.' delimiters.
-//
-// // looks for a key named 'baz', within struct 'bar', within struct 'foo'
-// tree.HasPath([]string{"foo","bar","baz"})
-//
-// // returns the key at this path, if it is there
-// tree.GetPath([]string{"foo","bar","baz"})
-//
-// Note that this is distinct from the heavyweight query syntax supported by
-// TomlTree.Query() and the Query() struct (see below).
-//
-// Position Support
-//
-// Each element within the TomlTree is stored with position metadata, which is
-// invaluable for providing semantic feedback to a user. This helps in
-// situations where the TOML file parses correctly, but contains data that is
-// not correct for the application. In such cases, an error message can be
-// generated that indicates the problem line and column number in the source
-// TOML document.
-//
-// // load TOML data
-// tree, _ := toml.Load("filename.toml")
-//
-// // get an entry and report an error if it's the wrong type
-// element := tree.Get("foo")
-// if value, ok := element.(int64); !ok {
-// return fmt.Errorf("%v: Element 'foo' must be an integer", tree.GetPosition("foo"))
-// }
-//
-// // report an error if an expected element is missing
-// if !tree.Has("bar") {
-// return fmt.Errorf("%v: Expected 'bar' element", tree.GetPosition(""))
-// }
-//
-// Query Support
-//
-// The TOML query path implementation is based loosely on the JSONPath specification:
-// http://goessner.net/articles/JsonPath/
-//
-// The idea behind a query path is to allow quick access to any element, or set
-// of elements within TOML document, with a single expression.
-//
-// result, err := tree.Query("$.foo.bar.baz")
-//
-// This is roughly equivalent to:
-//
-// next := tree.Get("foo")
-// if next != nil {
-// next = next.Get("bar")
-// if next != nil {
-// next = next.Get("baz")
-// }
-// }
-// result := next
-//
-// err is nil if any parsing exception occurs.
-//
-// If no node in the tree matches the query, result will simply contain an empty list of
-// items.
-//
-// As illustrated above, the query path is much more efficient, especially since
-// the structure of the TOML file can vary. Rather than making assumptions about
-// a document's structure, a query allows the programmer to make structured
-// requests into the document, and get zero or more values as a result.
-//
-// The syntax of a query begins with a root token, followed by any number
-// sub-expressions:
-//
-// $
-// Root of the TOML tree. This must always come first.
-// .name
-// Selects child of this node, where 'name' is a TOML key
-// name.
-// ['name']
-// Selects child of this node, where 'name' is a string
-// containing a TOML key name.
-// [index]
-// Selcts child array element at 'index'.
-// ..expr
-// Recursively selects all children, filtered by an a union,
-// index, or slice expression.
-// ..*
-// Recursive selection of all nodes at this point in the
-// tree.
-// .*
-// Selects all children of the current node.
-// [expr,expr]
-// Union operator - a logical 'or' grouping of two or more
-// sub-expressions: index, key name, or filter.
-// [start:end:step]
-// Slice operator - selects array elements from start to
-// end-1, at the given step. All three arguments are
-// optional.
-// [?(filter)]
-// Named filter expression - the function 'filter' is
-// used to filter children at this node.
-//
-// Query Indexes And Slices
-//
-// Index expressions perform no bounds checking, and will contribute no
-// values to the result set if the provided index or index range is invalid.
-// Negative indexes represent values from the end of the array, counting backwards.
-//
-// // select the last index of the array named 'foo'
-// tree.Query("$.foo[-1]")
-//
-// Slice expressions are supported, by using ':' to separate a start/end index pair.
-//
-// // select up to the first five elements in the array
-// tree.Query("$.foo[0:5]")
-//
-// Slice expressions also allow negative indexes for the start and stop
-// arguments.
-//
-// // select all array elements.
-// tree.Query("$.foo[0:-1]")
-//
-// Slice expressions may have an optional stride/step parameter:
-//
-// // select every other element
-// tree.Query("$.foo[0:-1:2]")
-//
-// Slice start and end parameters are also optional:
-//
-// // these are all equivalent and select all the values in the array
-// tree.Query("$.foo[:]")
-// tree.Query("$.foo[0:]")
-// tree.Query("$.foo[:-1]")
-// tree.Query("$.foo[0:-1:]")
-// tree.Query("$.foo[::1]")
-// tree.Query("$.foo[0::1]")
-// tree.Query("$.foo[:-1:1]")
-// tree.Query("$.foo[0:-1:1]")
-//
-// Query Filters
-//
-// Query filters are used within a Union [,] or single Filter [] expression.
-// A filter only allows nodes that qualify through to the next expression,
-// and/or into the result set.
-//
-// // returns children of foo that are permitted by the 'bar' filter.
-// tree.Query("$.foo[?(bar)]")
-//
-// There are several filters provided with the library:
-//
-// tree
-// Allows nodes of type TomlTree.
-// int
-// Allows nodes of type int64.
-// float
-// Allows nodes of type float64.
-// string
-// Allows nodes of type string.
-// time
-// Allows nodes of type time.Time.
-// bool
-// Allows nodes of type bool.
-//
-// Query Results
-//
-// An executed query returns a QueryResult object. This contains the nodes
-// in the TOML tree that qualify the query expression. Position information
-// is also available for each value in the set.
-//
-// // display the results of a query
-// results := tree.Query("$.foo.bar.baz")
-// for idx, value := results.Values() {
-// fmt.Println("%v: %v", results.Positions()[idx], value)
-// }
-//
-// Compiled Queries
-//
-// Queries may be executed directly on a TomlTree object, or compiled ahead
-// of time and executed discretely. The former is more convienent, but has the
-// penalty of having to recompile the query expression each time.
-//
-// // basic query
-// results := tree.Query("$.foo.bar.baz")
-//
-// // compiled query
-// query := toml.CompileQuery("$.foo.bar.baz")
-// results := query.Execute(tree)
-//
-// // run the compiled query again on a different tree
-// moreResults := query.Execute(anotherTree)
-//
-// User Defined Query Filters
-//
-// Filter expressions may also be user defined by using the SetFilter()
-// function on the Query object. The function must return true/false, which
-// signifies if the passed node is kept or discarded, respectively.
-//
-// // create a query that references a user-defined filter
-// query, _ := CompileQuery("$[?(bazOnly)]")
-//
-// // define the filter, and assign it to the query
-// query.SetFilter("bazOnly", func(node interface{}) bool{
-// if tree, ok := node.(*TomlTree); ok {
-// return tree.Has("baz")
-// }
-// return false // reject all other node types
-// })
-//
-// // run the query
-// query.Execute(tree)
+// The package github.com/pelletier/go-toml/query implements a system
+// similar to JSONPath to quickly retrive elements of a TOML document using a
+// single expression. See the package documentation for more information.
//
package toml
diff --git a/vendor/github.com/pelletier/go-toml/lexer.go b/vendor/github.com/pelletier/go-toml/lexer.go
index 104f3b1..1b6647d 100644
--- a/vendor/github.com/pelletier/go-toml/lexer.go
+++ b/vendor/github.com/pelletier/go-toml/lexer.go
@@ -6,14 +6,12 @@
package toml
import (
+ "bytes"
"errors"
"fmt"
- "io"
"regexp"
"strconv"
"strings"
-
- "github.com/pelletier/go-buffruneio"
)
var dateRegexp *regexp.Regexp
@@ -23,29 +21,29 @@ type tomlLexStateFn func() tomlLexStateFn
// Define lexer
type tomlLexer struct {
- input *buffruneio.Reader // Textual source
- buffer []rune // Runes composing the current token
- tokens chan token
- depth int
- line int
- col int
- endbufferLine int
- endbufferCol int
+ inputIdx int
+ input []rune // Textual source
+ currentTokenStart int
+ currentTokenStop int
+ tokens []token
+ depth int
+ line int
+ col int
+ endbufferLine int
+ endbufferCol int
}
// Basic read operations on input
func (l *tomlLexer) read() rune {
- r, _, err := l.input.ReadRune()
- if err != nil {
- panic(err)
- }
+ r := l.peek()
if r == '\n' {
l.endbufferLine++
l.endbufferCol = 1
} else {
l.endbufferCol++
}
+ l.inputIdx++
return r
}
@@ -53,13 +51,13 @@ func (l *tomlLexer) next() rune {
r := l.read()
if r != eof {
- l.buffer = append(l.buffer, r)
+ l.currentTokenStop++
}
return r
}
func (l *tomlLexer) ignore() {
- l.buffer = make([]rune, 0)
+ l.currentTokenStart = l.currentTokenStop
l.line = l.endbufferLine
l.col = l.endbufferCol
}
@@ -76,49 +74,46 @@ func (l *tomlLexer) fastForward(n int) {
}
func (l *tomlLexer) emitWithValue(t tokenType, value string) {
- l.tokens <- token{
+ l.tokens = append(l.tokens, token{
Position: Position{l.line, l.col},
typ: t,
val: value,
- }
+ })
l.ignore()
}
func (l *tomlLexer) emit(t tokenType) {
- l.emitWithValue(t, string(l.buffer))
+ l.emitWithValue(t, string(l.input[l.currentTokenStart:l.currentTokenStop]))
}
func (l *tomlLexer) peek() rune {
- r, _, err := l.input.ReadRune()
- if err != nil {
- panic(err)
+ if l.inputIdx >= len(l.input) {
+ return eof
}
- l.input.UnreadRune()
- return r
+ return l.input[l.inputIdx]
}
-func (l *tomlLexer) follow(next string) bool {
- for _, expectedRune := range next {
- r, _, err := l.input.ReadRune()
- defer l.input.UnreadRune()
- if err != nil {
- panic(err)
- }
- if expectedRune != r {
- return false
- }
+func (l *tomlLexer) peekString(size int) string {
+ maxIdx := len(l.input)
+ upperIdx := l.inputIdx + size // FIXME: potential overflow
+ if upperIdx > maxIdx {
+ upperIdx = maxIdx
}
- return true
+ return string(l.input[l.inputIdx:upperIdx])
+}
+
+func (l *tomlLexer) follow(next string) bool {
+ return next == l.peekString(len(next))
}
// Error management
func (l *tomlLexer) errorf(format string, args ...interface{}) tomlLexStateFn {
- l.tokens <- token{
+ l.tokens = append(l.tokens, token{
Position: Position{l.line, l.col},
typ: tokenError,
val: fmt.Sprintf(format, args...),
- }
+ })
return nil
}
@@ -219,7 +214,7 @@ func (l *tomlLexer) lexRvalue() tomlLexStateFn {
break
}
- possibleDate := string(l.input.PeekRunes(35))
+ possibleDate := l.peekString(35)
dateMatch := dateRegexp.FindString(possibleDate)
if dateMatch != "" {
l.fastForward(len(dateMatch))
@@ -536,7 +531,7 @@ func (l *tomlLexer) lexInsideTableArrayKey() tomlLexStateFn {
for r := l.peek(); r != eof; r = l.peek() {
switch r {
case ']':
- if len(l.buffer) > 0 {
+ if l.currentTokenStop > l.currentTokenStart {
l.emit(tokenKeyGroupArray)
}
l.next()
@@ -559,7 +554,7 @@ func (l *tomlLexer) lexInsideTableKey() tomlLexStateFn {
for r := l.peek(); r != eof; r = l.peek() {
switch r {
case ']':
- if len(l.buffer) > 0 {
+ if l.currentTokenStop > l.currentTokenStart {
l.emit(tokenKeyGroup)
}
l.next()
@@ -634,7 +629,6 @@ func (l *tomlLexer) run() {
for state := l.lexVoid; state != nil; {
state = state()
}
- close(l.tokens)
}
func init() {
@@ -642,16 +636,16 @@ func init() {
}
// Entry point
-func lexToml(input io.Reader) chan token {
- bufferedInput := buffruneio.NewReader(input)
+func lexToml(inputBytes []byte) []token {
+ runes := bytes.Runes(inputBytes)
l := &tomlLexer{
- input: bufferedInput,
- tokens: make(chan token),
+ input: runes,
+ tokens: make([]token, 0, 256),
line: 1,
col: 1,
endbufferLine: 1,
endbufferCol: 1,
}
- go l.run()
+ l.run()
return l.tokens
}
diff --git a/vendor/github.com/pelletier/go-toml/marshal.go b/vendor/github.com/pelletier/go-toml/marshal.go
index a1d7010..ce3bd8d 100644
--- a/vendor/github.com/pelletier/go-toml/marshal.go
+++ b/vendor/github.com/pelletier/go-toml/marshal.go
@@ -1,33 +1,19 @@
package toml
import (
+ "bytes"
"errors"
"fmt"
"reflect"
+ "strconv"
"strings"
"time"
)
-/*
-TomlTree structural types and corresponding marshal types
--------------------------------------------------------------------------------
-*TomlTree (*)struct, (*)map[string]interface{}
-[]*TomlTree (*)[](*)struct, (*)[](*)map[string]interface{}
-[]interface{} (as interface{}) (*)[]primitive, (*)[]([]interface{})
-interface{} (*)primitive
-
-TomlTree primitive types and corresponding marshal types
------------------------------------------------------------
-uint64 uint, uint8-uint64, pointers to same
-int64 int, int8-uint64, pointers to same
-float64 float32, float64, pointers to same
-string string, pointers to same
-bool bool, pointers to same
-time.Time time.Time{}, pointers to same
-*/
-
type tomlOpts struct {
name string
+ comment string
+ commented bool
include bool
omitempty bool
}
@@ -35,7 +21,7 @@ type tomlOpts struct {
var timeType = reflect.TypeOf(time.Time{})
var marshalerType = reflect.TypeOf(new(Marshaler)).Elem()
-// Check if the given marshall type maps to a TomlTree primitive
+// Check if the given marshall type maps to a Tree primitive
func isPrimitive(mtype reflect.Type) bool {
switch mtype.Kind() {
case reflect.Ptr:
@@ -57,7 +43,7 @@ func isPrimitive(mtype reflect.Type) bool {
}
}
-// Check if the given marshall type maps to a TomlTree slice
+// Check if the given marshall type maps to a Tree slice
func isTreeSlice(mtype reflect.Type) bool {
switch mtype.Kind() {
case reflect.Slice:
@@ -67,7 +53,7 @@ func isTreeSlice(mtype reflect.Type) bool {
}
}
-// Check if the given marshall type maps to a non-TomlTree slice
+// Check if the given marshall type maps to a non-Tree slice
func isOtherSlice(mtype reflect.Type) bool {
switch mtype.Kind() {
case reflect.Ptr:
@@ -79,7 +65,7 @@ func isOtherSlice(mtype reflect.Type) bool {
}
}
-// Check if the given marshall type maps to a TomlTree
+// Check if the given marshall type maps to a Tree
func isTree(mtype reflect.Type) bool {
switch mtype.Kind() {
case reflect.Map:
@@ -111,9 +97,32 @@ encoder, except that there is no concept of a Marshaler interface or MarshalTOML
function for sub-structs, and currently only definite types can be marshaled
(i.e. no `interface{}`).
+The following struct annotations are supported:
+
+ toml:"Field" Overrides the field's name to output.
+ omitempty When set, empty values and groups are not emitted.
+ comment:"comment" Emits a # comment on the same line. This supports new lines.
+ commented:"true" Emits the value as commented.
+
Note that pointers are automatically assigned the "omitempty" option, as TOML
explicity does not handle null values (saying instead the label should be
dropped).
+
+Tree structural types and corresponding marshal types:
+
+ *Tree (*)struct, (*)map[string]interface{}
+ []*Tree (*)[](*)struct, (*)[](*)map[string]interface{}
+ []interface{} (as interface{}) (*)[]primitive, (*)[]([]interface{})
+ interface{} (*)primitive
+
+Tree primitive types and corresponding marshal types:
+
+ uint64 uint, uint8-uint64, pointers to same
+ int64 int, int8-uint64, pointers to same
+ float64 float32, float64, pointers to same
+ string string, pointers to same
+ bool bool, pointers to same
+ time.Time time.Time{}, pointers to same
*/
func Marshal(v interface{}) ([]byte, error) {
mtype := reflect.TypeOf(v)
@@ -133,11 +142,11 @@ func Marshal(v interface{}) ([]byte, error) {
}
// Convert given marshal struct or map value to toml tree
-func valueToTree(mtype reflect.Type, mval reflect.Value) (*TomlTree, error) {
+func valueToTree(mtype reflect.Type, mval reflect.Value) (*Tree, error) {
if mtype.Kind() == reflect.Ptr {
return valueToTree(mtype.Elem(), mval.Elem())
}
- tval := newTomlTree()
+ tval := newTree()
switch mtype.Kind() {
case reflect.Struct:
for i := 0; i < mtype.NumField(); i++ {
@@ -148,7 +157,7 @@ func valueToTree(mtype reflect.Type, mval reflect.Value) (*TomlTree, error) {
if err != nil {
return nil, err
}
- tval.Set(opts.name, val)
+ tval.Set(opts.name, opts.comment, opts.commented, val)
}
}
case reflect.Map:
@@ -158,15 +167,15 @@ func valueToTree(mtype reflect.Type, mval reflect.Value) (*TomlTree, error) {
if err != nil {
return nil, err
}
- tval.Set(key.String(), val)
+ tval.Set(key.String(), "", false, val)
}
}
return tval, nil
}
// Convert given marshal slice to slice of Toml trees
-func valueToTreeSlice(mtype reflect.Type, mval reflect.Value) ([]*TomlTree, error) {
- tval := make([]*TomlTree, mval.Len(), mval.Len())
+func valueToTreeSlice(mtype reflect.Type, mval reflect.Value) ([]*Tree, error) {
+ tval := make([]*Tree, mval.Len(), mval.Len())
for i := 0; i < mval.Len(); i++ {
val, err := valueToTree(mtype.Elem(), mval.Index(i))
if err != nil {
@@ -224,34 +233,44 @@ func valueToToml(mtype reflect.Type, mval reflect.Value) (interface{}, error) {
}
}
-/*
-Unmarshal parses the TOML-encoded data and stores the result in the value
-pointed to by v. Behavior is similar to the Go json encoder, except that there
-is no concept of an Unmarshaler interface or UnmarshalTOML function for
-sub-structs, and currently only definite types can be unmarshaled to (i.e. no
-`interface{}`).
-*/
-func Unmarshal(data []byte, v interface{}) error {
+// Unmarshal attempts to unmarshal the Tree into a Go struct pointed by v.
+// Neither Unmarshaler interfaces nor UnmarshalTOML functions are supported for
+// sub-structs, and only definite types can be unmarshaled.
+func (t *Tree) Unmarshal(v interface{}) error {
mtype := reflect.TypeOf(v)
if mtype.Kind() != reflect.Ptr || mtype.Elem().Kind() != reflect.Struct {
return errors.New("Only a pointer to struct can be unmarshaled from TOML")
}
- t, err := Load(string(data))
+ sval, err := valueFromTree(mtype.Elem(), t)
if err != nil {
return err
}
+ reflect.ValueOf(v).Elem().Set(sval)
+ return nil
+}
- sval, err := valueFromTree(mtype.Elem(), t)
+// Unmarshal parses the TOML-encoded data and stores the result in the value
+// pointed to by v. Behavior is similar to the Go json encoder, except that there
+// is no concept of an Unmarshaler interface or UnmarshalTOML function for
+// sub-structs, and currently only definite types can be unmarshaled to (i.e. no
+// `interface{}`).
+//
+// The following struct annotations are supported:
+//
+// toml:"Field" Overrides the field's name to map to.
+//
+// See Marshal() documentation for types mapping table.
+func Unmarshal(data []byte, v interface{}) error {
+ t, err := LoadReader(bytes.NewReader(data))
if err != nil {
return err
}
- reflect.ValueOf(v).Elem().Set(sval)
- return nil
+ return t.Unmarshal(v)
}
// Convert toml tree to marshal struct or map, using marshal type
-func valueFromTree(mtype reflect.Type, tval *TomlTree) (reflect.Value, error) {
+func valueFromTree(mtype reflect.Type, tval *Tree) (reflect.Value, error) {
if mtype.Kind() == reflect.Ptr {
return unwrapPointer(mtype, tval)
}
@@ -263,15 +282,20 @@ func valueFromTree(mtype reflect.Type, tval *TomlTree) (reflect.Value, error) {
mtypef := mtype.Field(i)
opts := tomlOptions(mtypef)
if opts.include {
- key := opts.name
- exists := tval.Has(key)
- if exists {
+ baseKey := opts.name
+ keysToTry := []string{baseKey, strings.ToLower(baseKey), strings.ToTitle(baseKey)}
+ for _, key := range keysToTry {
+ exists := tval.Has(key)
+ if !exists {
+ continue
+ }
val := tval.Get(key)
mvalf, err := valueFromToml(mtypef.Type, val)
if err != nil {
return mval, formatError(err, tval.GetPosition(key))
}
mval.Field(i).Set(mvalf)
+ break
}
}
}
@@ -290,7 +314,7 @@ func valueFromTree(mtype reflect.Type, tval *TomlTree) (reflect.Value, error) {
}
// Convert toml value to marshal struct/map slice, using marshal type
-func valueFromTreeSlice(mtype reflect.Type, tval []*TomlTree) (reflect.Value, error) {
+func valueFromTreeSlice(mtype reflect.Type, tval []*Tree) (reflect.Value, error) {
mval := reflect.MakeSlice(mtype, len(tval), len(tval))
for i := 0; i < len(tval); i++ {
val, err := valueFromTree(mtype.Elem(), tval[i])
@@ -322,9 +346,9 @@ func valueFromToml(mtype reflect.Type, tval interface{}) (reflect.Value, error)
}
switch {
case isTree(mtype):
- return valueFromTree(mtype, tval.(*TomlTree))
+ return valueFromTree(mtype, tval.(*Tree))
case isTreeSlice(mtype):
- return valueFromTreeSlice(mtype, tval.([]*TomlTree))
+ return valueFromTreeSlice(mtype, tval.([]*Tree))
case isOtherSlice(mtype):
return valueFromOtherSlice(mtype, tval.([]interface{}))
default:
@@ -438,7 +462,12 @@ func unwrapPointer(mtype reflect.Type, tval interface{}) (reflect.Value, error)
func tomlOptions(vf reflect.StructField) tomlOpts {
tag := vf.Tag.Get("toml")
parse := strings.Split(tag, ",")
- result := tomlOpts{vf.Name, true, false}
+ var comment string
+ if c := vf.Tag.Get("comment"); c != "" {
+ comment = c
+ }
+ commented, _ := strconv.ParseBool(vf.Tag.Get("commented"))
+ result := tomlOpts{name: vf.Name, comment: comment, commented: commented, include: true, omitempty: false}
if parse[0] != "" {
if parse[0] == "-" && len(parse) == 1 {
result.include = false
diff --git a/vendor/github.com/pelletier/go-toml/match.go b/vendor/github.com/pelletier/go-toml/match.go
deleted file mode 100644
index 48b0f2a..0000000
--- a/vendor/github.com/pelletier/go-toml/match.go
+++ /dev/null
@@ -1,234 +0,0 @@
-package toml
-
-import (
- "fmt"
-)
-
-// support function to set positions for tomlValues
-// NOTE: this is done to allow ctx.lastPosition to indicate the start of any
-// values returned by the query engines
-func tomlValueCheck(node interface{}, ctx *queryContext) interface{} {
- switch castNode := node.(type) {
- case *tomlValue:
- ctx.lastPosition = castNode.position
- return castNode.value
- case []*TomlTree:
- if len(castNode) > 0 {
- ctx.lastPosition = castNode[0].position
- }
- return node
- default:
- return node
- }
-}
-
-// base match
-type matchBase struct {
- next pathFn
-}
-
-func (f *matchBase) setNext(next pathFn) {
- f.next = next
-}
-
-// terminating functor - gathers results
-type terminatingFn struct {
- // empty
-}
-
-func newTerminatingFn() *terminatingFn {
- return &terminatingFn{}
-}
-
-func (f *terminatingFn) setNext(next pathFn) {
- // do nothing
-}
-
-func (f *terminatingFn) call(node interface{}, ctx *queryContext) {
- switch castNode := node.(type) {
- case *TomlTree:
- ctx.result.appendResult(node, castNode.position)
- case *tomlValue:
- ctx.result.appendResult(node, castNode.position)
- default:
- // use last position for scalars
- ctx.result.appendResult(node, ctx.lastPosition)
- }
-}
-
-// match single key
-type matchKeyFn struct {
- matchBase
- Name string
-}
-
-func newMatchKeyFn(name string) *matchKeyFn {
- return &matchKeyFn{Name: name}
-}
-
-func (f *matchKeyFn) call(node interface{}, ctx *queryContext) {
- if array, ok := node.([]*TomlTree); ok {
- for _, tree := range array {
- item := tree.values[f.Name]
- if item != nil {
- f.next.call(item, ctx)
- }
- }
- } else if tree, ok := node.(*TomlTree); ok {
- item := tree.values[f.Name]
- if item != nil {
- f.next.call(item, ctx)
- }
- }
-}
-
-// match single index
-type matchIndexFn struct {
- matchBase
- Idx int
-}
-
-func newMatchIndexFn(idx int) *matchIndexFn {
- return &matchIndexFn{Idx: idx}
-}
-
-func (f *matchIndexFn) call(node interface{}, ctx *queryContext) {
- if arr, ok := tomlValueCheck(node, ctx).([]interface{}); ok {
- if f.Idx < len(arr) && f.Idx >= 0 {
- f.next.call(arr[f.Idx], ctx)
- }
- }
-}
-
-// filter by slicing
-type matchSliceFn struct {
- matchBase
- Start, End, Step int
-}
-
-func newMatchSliceFn(start, end, step int) *matchSliceFn {
- return &matchSliceFn{Start: start, End: end, Step: step}
-}
-
-func (f *matchSliceFn) call(node interface{}, ctx *queryContext) {
- if arr, ok := tomlValueCheck(node, ctx).([]interface{}); ok {
- // adjust indexes for negative values, reverse ordering
- realStart, realEnd := f.Start, f.End
- if realStart < 0 {
- realStart = len(arr) + realStart
- }
- if realEnd < 0 {
- realEnd = len(arr) + realEnd
- }
- if realEnd < realStart {
- realEnd, realStart = realStart, realEnd // swap
- }
- // loop and gather
- for idx := realStart; idx < realEnd; idx += f.Step {
- f.next.call(arr[idx], ctx)
- }
- }
-}
-
-// match anything
-type matchAnyFn struct {
- matchBase
-}
-
-func newMatchAnyFn() *matchAnyFn {
- return &matchAnyFn{}
-}
-
-func (f *matchAnyFn) call(node interface{}, ctx *queryContext) {
- if tree, ok := node.(*TomlTree); ok {
- for _, v := range tree.values {
- f.next.call(v, ctx)
- }
- }
-}
-
-// filter through union
-type matchUnionFn struct {
- Union []pathFn
-}
-
-func (f *matchUnionFn) setNext(next pathFn) {
- for _, fn := range f.Union {
- fn.setNext(next)
- }
-}
-
-func (f *matchUnionFn) call(node interface{}, ctx *queryContext) {
- for _, fn := range f.Union {
- fn.call(node, ctx)
- }
-}
-
-// match every single last node in the tree
-type matchRecursiveFn struct {
- matchBase
-}
-
-func newMatchRecursiveFn() *matchRecursiveFn {
- return &matchRecursiveFn{}
-}
-
-func (f *matchRecursiveFn) call(node interface{}, ctx *queryContext) {
- if tree, ok := node.(*TomlTree); ok {
- var visit func(tree *TomlTree)
- visit = func(tree *TomlTree) {
- for _, v := range tree.values {
- f.next.call(v, ctx)
- switch node := v.(type) {
- case *TomlTree:
- visit(node)
- case []*TomlTree:
- for _, subtree := range node {
- visit(subtree)
- }
- }
- }
- }
- f.next.call(tree, ctx)
- visit(tree)
- }
-}
-
-// match based on an externally provided functional filter
-type matchFilterFn struct {
- matchBase
- Pos Position
- Name string
-}
-
-func newMatchFilterFn(name string, pos Position) *matchFilterFn {
- return &matchFilterFn{Name: name, Pos: pos}
-}
-
-func (f *matchFilterFn) call(node interface{}, ctx *queryContext) {
- fn, ok := (*ctx.filters)[f.Name]
- if !ok {
- panic(fmt.Sprintf("%s: query context does not have filter '%s'",
- f.Pos.String(), f.Name))
- }
- switch castNode := tomlValueCheck(node, ctx).(type) {
- case *TomlTree:
- for _, v := range castNode.values {
- if tv, ok := v.(*tomlValue); ok {
- if fn(tv.value) {
- f.next.call(v, ctx)
- }
- } else {
- if fn(v) {
- f.next.call(v, ctx)
- }
- }
- }
- case []interface{}:
- for _, v := range castNode {
- if fn(v) {
- f.next.call(v, ctx)
- }
- }
- }
-}
diff --git a/vendor/github.com/pelletier/go-toml/parser.go b/vendor/github.com/pelletier/go-toml/parser.go
index 20e90a3..d492a1e 100644
--- a/vendor/github.com/pelletier/go-toml/parser.go
+++ b/vendor/github.com/pelletier/go-toml/parser.go
@@ -13,9 +13,9 @@ import (
)
type tomlParser struct {
- flow chan token
- tree *TomlTree
- tokensBuffer []token
+ flowIdx int
+ flow []token
+ tree *Tree
currentTable []string
seenTableKeys []string
}
@@ -34,16 +34,10 @@ func (p *tomlParser) run() {
}
func (p *tomlParser) peek() *token {
- if len(p.tokensBuffer) != 0 {
- return &(p.tokensBuffer[0])
- }
-
- tok, ok := <-p.flow
- if !ok {
+ if p.flowIdx >= len(p.flow) {
return nil
}
- p.tokensBuffer = append(p.tokensBuffer, tok)
- return &tok
+ return &p.flow[p.flowIdx]
}
func (p *tomlParser) assume(typ tokenType) {
@@ -57,16 +51,12 @@ func (p *tomlParser) assume(typ tokenType) {
}
func (p *tomlParser) getToken() *token {
- if len(p.tokensBuffer) != 0 {
- tok := p.tokensBuffer[0]
- p.tokensBuffer = p.tokensBuffer[1:]
- return &tok
- }
- tok, ok := <-p.flow
- if !ok {
+ tok := p.peek()
+ if tok == nil {
return nil
}
- return &tok
+ p.flowIdx++
+ return tok
}
func (p *tomlParser) parseStart() tomlParserStateFn {
@@ -106,21 +96,21 @@ func (p *tomlParser) parseGroupArray() tomlParserStateFn {
}
p.tree.createSubTree(keys[:len(keys)-1], startToken.Position) // create parent entries
destTree := p.tree.GetPath(keys)
- var array []*TomlTree
+ var array []*Tree
if destTree == nil {
- array = make([]*TomlTree, 0)
- } else if target, ok := destTree.([]*TomlTree); ok && target != nil {
- array = destTree.([]*TomlTree)
+ array = make([]*Tree, 0)
+ } else if target, ok := destTree.([]*Tree); ok && target != nil {
+ array = destTree.([]*Tree)
} else {
p.raiseError(key, "key %s is already assigned and not of type table array", key)
}
p.currentTable = keys
// add a new tree to the end of the table array
- newTree := newTomlTree()
+ newTree := newTree()
newTree.position = startToken.Position
array = append(array, newTree)
- p.tree.SetPath(p.currentTable, array)
+ p.tree.SetPath(p.currentTable, "", false, array)
// remove all keys that were children of this table array
prefix := key.val + "."
@@ -183,11 +173,11 @@ func (p *tomlParser) parseAssign() tomlParserStateFn {
}
// find the table to assign, looking out for arrays of tables
- var targetNode *TomlTree
+ var targetNode *Tree
switch node := p.tree.GetPath(tableKey).(type) {
- case []*TomlTree:
+ case []*Tree:
targetNode = node[len(node)-1]
- case *TomlTree:
+ case *Tree:
targetNode = node
default:
p.raiseError(key, "Unknown table type for path: %s",
@@ -212,10 +202,10 @@ func (p *tomlParser) parseAssign() tomlParserStateFn {
var toInsert interface{}
switch value.(type) {
- case *TomlTree, []*TomlTree:
+ case *Tree, []*Tree:
toInsert = value
default:
- toInsert = &tomlValue{value, key.Position}
+ toInsert = &tomlValue{value: value, position: key.Position}
}
targetNode.values[keyVal] = toInsert
return p.parseStart
@@ -289,8 +279,8 @@ func tokenIsComma(t *token) bool {
return t != nil && t.typ == tokenComma
}
-func (p *tomlParser) parseInlineTable() *TomlTree {
- tree := newTomlTree()
+func (p *tomlParser) parseInlineTable() *Tree {
+ tree := newTree()
var previous *token
Loop:
for {
@@ -309,7 +299,7 @@ Loop:
key := p.getToken()
p.assume(tokenEqual)
value := p.parseRvalue()
- tree.Set(key.val, value)
+ tree.Set(key.val, "", false, value)
case tokenComma:
if previous == nil {
p.raiseError(follow, "inline table cannot start with a comma")
@@ -360,27 +350,27 @@ func (p *tomlParser) parseArray() interface{} {
p.getToken()
}
}
- // An array of TomlTrees is actually an array of inline
+ // An array of Trees is actually an array of inline
// tables, which is a shorthand for a table array. If the
- // array was not converted from []interface{} to []*TomlTree,
+ // array was not converted from []interface{} to []*Tree,
// the two notations would not be equivalent.
- if arrayType == reflect.TypeOf(newTomlTree()) {
- tomlArray := make([]*TomlTree, len(array))
+ if arrayType == reflect.TypeOf(newTree()) {
+ tomlArray := make([]*Tree, len(array))
for i, v := range array {
- tomlArray[i] = v.(*TomlTree)
+ tomlArray[i] = v.(*Tree)
}
return tomlArray
}
return array
}
-func parseToml(flow chan token) *TomlTree {
- result := newTomlTree()
+func parseToml(flow []token) *Tree {
+ result := newTree()
result.position = Position{1, 1}
parser := &tomlParser{
+ flowIdx: 0,
flow: flow,
tree: result,
- tokensBuffer: make([]token, 0),
currentTable: make([]string, 0),
seenTableKeys: make([]string, 0),
}
diff --git a/vendor/github.com/pelletier/go-toml/query.go b/vendor/github.com/pelletier/go-toml/query.go
deleted file mode 100644
index 307a1ec..0000000
--- a/vendor/github.com/pelletier/go-toml/query.go
+++ /dev/null
@@ -1,153 +0,0 @@
-package toml
-
-import (
- "time"
-)
-
-// NodeFilterFn represents a user-defined filter function, for use with
-// Query.SetFilter().
-//
-// The return value of the function must indicate if 'node' is to be included
-// at this stage of the TOML path. Returning true will include the node, and
-// returning false will exclude it.
-//
-// NOTE: Care should be taken to write script callbacks such that they are safe
-// to use from multiple goroutines.
-type NodeFilterFn func(node interface{}) bool
-
-// QueryResult is the result of Executing a Query.
-type QueryResult struct {
- items []interface{}
- positions []Position
-}
-
-// appends a value/position pair to the result set.
-func (r *QueryResult) appendResult(node interface{}, pos Position) {
- r.items = append(r.items, node)
- r.positions = append(r.positions, pos)
-}
-
-// Values is a set of values within a QueryResult. The order of values is not
-// guaranteed to be in document order, and may be different each time a query is
-// executed.
-func (r QueryResult) Values() []interface{} {
- values := make([]interface{}, len(r.items))
- for i, v := range r.items {
- o, ok := v.(*tomlValue)
- if ok {
- values[i] = o.value
- } else {
- values[i] = v
- }
- }
- return values
-}
-
-// Positions is a set of positions for values within a QueryResult. Each index
-// in Positions() corresponds to the entry in Value() of the same index.
-func (r QueryResult) Positions() []Position {
- return r.positions
-}
-
-// runtime context for executing query paths
-type queryContext struct {
- result *QueryResult
- filters *map[string]NodeFilterFn
- lastPosition Position
-}
-
-// generic path functor interface
-type pathFn interface {
- setNext(next pathFn)
- call(node interface{}, ctx *queryContext)
-}
-
-// A Query is the representation of a compiled TOML path. A Query is safe
-// for concurrent use by multiple goroutines.
-type Query struct {
- root pathFn
- tail pathFn
- filters *map[string]NodeFilterFn
-}
-
-func newQuery() *Query {
- return &Query{
- root: nil,
- tail: nil,
- filters: &defaultFilterFunctions,
- }
-}
-
-func (q *Query) appendPath(next pathFn) {
- if q.root == nil {
- q.root = next
- } else {
- q.tail.setNext(next)
- }
- q.tail = next
- next.setNext(newTerminatingFn()) // init the next functor
-}
-
-// CompileQuery compiles a TOML path expression. The returned Query can be used
-// to match elements within a TomlTree and its descendants.
-func CompileQuery(path string) (*Query, error) {
- return parseQuery(lexQuery(path))
-}
-
-// Execute executes a query against a TomlTree, and returns the result of the query.
-func (q *Query) Execute(tree *TomlTree) *QueryResult {
- result := &QueryResult{
- items: []interface{}{},
- positions: []Position{},
- }
- if q.root == nil {
- result.appendResult(tree, tree.GetPosition(""))
- } else {
- ctx := &queryContext{
- result: result,
- filters: q.filters,
- }
- q.root.call(tree, ctx)
- }
- return result
-}
-
-// SetFilter sets a user-defined filter function. These may be used inside
-// "?(..)" query expressions to filter TOML document elements within a query.
-func (q *Query) SetFilter(name string, fn NodeFilterFn) {
- if q.filters == &defaultFilterFunctions {
- // clone the static table
- q.filters = &map[string]NodeFilterFn{}
- for k, v := range defaultFilterFunctions {
- (*q.filters)[k] = v
- }
- }
- (*q.filters)[name] = fn
-}
-
-var defaultFilterFunctions = map[string]NodeFilterFn{
- "tree": func(node interface{}) bool {
- _, ok := node.(*TomlTree)
- return ok
- },
- "int": func(node interface{}) bool {
- _, ok := node.(int64)
- return ok
- },
- "float": func(node interface{}) bool {
- _, ok := node.(float64)
- return ok
- },
- "string": func(node interface{}) bool {
- _, ok := node.(string)
- return ok
- },
- "time": func(node interface{}) bool {
- _, ok := node.(time.Time)
- return ok
- },
- "bool": func(node interface{}) bool {
- _, ok := node.(bool)
- return ok
- },
-}
diff --git a/vendor/github.com/pelletier/go-toml/querylexer.go b/vendor/github.com/pelletier/go-toml/querylexer.go
deleted file mode 100644
index 960681d..0000000
--- a/vendor/github.com/pelletier/go-toml/querylexer.go
+++ /dev/null
@@ -1,356 +0,0 @@
-// TOML JSONPath lexer.
-//
-// Written using the principles developed by Rob Pike in
-// http://www.youtube.com/watch?v=HxaD_trXwRE
-
-package toml
-
-import (
- "fmt"
- "strconv"
- "strings"
- "unicode/utf8"
-)
-
-// Lexer state function
-type queryLexStateFn func() queryLexStateFn
-
-// Lexer definition
-type queryLexer struct {
- input string
- start int
- pos int
- width int
- tokens chan token
- depth int
- line int
- col int
- stringTerm string
-}
-
-func (l *queryLexer) run() {
- for state := l.lexVoid; state != nil; {
- state = state()
- }
- close(l.tokens)
-}
-
-func (l *queryLexer) nextStart() {
- // iterate by runes (utf8 characters)
- // search for newlines and advance line/col counts
- for i := l.start; i < l.pos; {
- r, width := utf8.DecodeRuneInString(l.input[i:])
- if r == '\n' {
- l.line++
- l.col = 1
- } else {
- l.col++
- }
- i += width
- }
- // advance start position to next token
- l.start = l.pos
-}
-
-func (l *queryLexer) emit(t tokenType) {
- l.tokens <- token{
- Position: Position{l.line, l.col},
- typ: t,
- val: l.input[l.start:l.pos],
- }
- l.nextStart()
-}
-
-func (l *queryLexer) emitWithValue(t tokenType, value string) {
- l.tokens <- token{
- Position: Position{l.line, l.col},
- typ: t,
- val: value,
- }
- l.nextStart()
-}
-
-func (l *queryLexer) next() rune {
- if l.pos >= len(l.input) {
- l.width = 0
- return eof
- }
- var r rune
- r, l.width = utf8.DecodeRuneInString(l.input[l.pos:])
- l.pos += l.width
- return r
-}
-
-func (l *queryLexer) ignore() {
- l.nextStart()
-}
-
-func (l *queryLexer) backup() {
- l.pos -= l.width
-}
-
-func (l *queryLexer) errorf(format string, args ...interface{}) queryLexStateFn {
- l.tokens <- token{
- Position: Position{l.line, l.col},
- typ: tokenError,
- val: fmt.Sprintf(format, args...),
- }
- return nil
-}
-
-func (l *queryLexer) peek() rune {
- r := l.next()
- l.backup()
- return r
-}
-
-func (l *queryLexer) accept(valid string) bool {
- if strings.ContainsRune(valid, l.next()) {
- return true
- }
- l.backup()
- return false
-}
-
-func (l *queryLexer) follow(next string) bool {
- return strings.HasPrefix(l.input[l.pos:], next)
-}
-
-func (l *queryLexer) lexVoid() queryLexStateFn {
- for {
- next := l.peek()
- switch next {
- case '$':
- l.pos++
- l.emit(tokenDollar)
- continue
- case '.':
- if l.follow("..") {
- l.pos += 2
- l.emit(tokenDotDot)
- } else {
- l.pos++
- l.emit(tokenDot)
- }
- continue
- case '[':
- l.pos++
- l.emit(tokenLeftBracket)
- continue
- case ']':
- l.pos++
- l.emit(tokenRightBracket)
- continue
- case ',':
- l.pos++
- l.emit(tokenComma)
- continue
- case '*':
- l.pos++
- l.emit(tokenStar)
- continue
- case '(':
- l.pos++
- l.emit(tokenLeftParen)
- continue
- case ')':
- l.pos++
- l.emit(tokenRightParen)
- continue
- case '?':
- l.pos++
- l.emit(tokenQuestion)
- continue
- case ':':
- l.pos++
- l.emit(tokenColon)
- continue
- case '\'':
- l.ignore()
- l.stringTerm = string(next)
- return l.lexString
- case '"':
- l.ignore()
- l.stringTerm = string(next)
- return l.lexString
- }
-
- if isSpace(next) {
- l.next()
- l.ignore()
- continue
- }
-
- if isAlphanumeric(next) {
- return l.lexKey
- }
-
- if next == '+' || next == '-' || isDigit(next) {
- return l.lexNumber
- }
-
- if l.next() == eof {
- break
- }
-
- return l.errorf("unexpected char: '%v'", next)
- }
- l.emit(tokenEOF)
- return nil
-}
-
-func (l *queryLexer) lexKey() queryLexStateFn {
- for {
- next := l.peek()
- if !isAlphanumeric(next) {
- l.emit(tokenKey)
- return l.lexVoid
- }
-
- if l.next() == eof {
- break
- }
- }
- l.emit(tokenEOF)
- return nil
-}
-
-func (l *queryLexer) lexString() queryLexStateFn {
- l.pos++
- l.ignore()
- growingString := ""
-
- for {
- if l.follow(l.stringTerm) {
- l.emitWithValue(tokenString, growingString)
- l.pos++
- l.ignore()
- return l.lexVoid
- }
-
- if l.follow("\\\"") {
- l.pos++
- growingString += "\""
- } else if l.follow("\\'") {
- l.pos++
- growingString += "'"
- } else if l.follow("\\n") {
- l.pos++
- growingString += "\n"
- } else if l.follow("\\b") {
- l.pos++
- growingString += "\b"
- } else if l.follow("\\f") {
- l.pos++
- growingString += "\f"
- } else if l.follow("\\/") {
- l.pos++
- growingString += "/"
- } else if l.follow("\\t") {
- l.pos++
- growingString += "\t"
- } else if l.follow("\\r") {
- l.pos++
- growingString += "\r"
- } else if l.follow("\\\\") {
- l.pos++
- growingString += "\\"
- } else if l.follow("\\u") {
- l.pos += 2
- code := ""
- for i := 0; i < 4; i++ {
- c := l.peek()
- l.pos++
- if !isHexDigit(c) {
- return l.errorf("unfinished unicode escape")
- }
- code = code + string(c)
- }
- l.pos--
- intcode, err := strconv.ParseInt(code, 16, 32)
- if err != nil {
- return l.errorf("invalid unicode escape: \\u" + code)
- }
- growingString += string(rune(intcode))
- } else if l.follow("\\U") {
- l.pos += 2
- code := ""
- for i := 0; i < 8; i++ {
- c := l.peek()
- l.pos++
- if !isHexDigit(c) {
- return l.errorf("unfinished unicode escape")
- }
- code = code + string(c)
- }
- l.pos--
- intcode, err := strconv.ParseInt(code, 16, 32)
- if err != nil {
- return l.errorf("invalid unicode escape: \\u" + code)
- }
- growingString += string(rune(intcode))
- } else if l.follow("\\") {
- l.pos++
- return l.errorf("invalid escape sequence: \\" + string(l.peek()))
- } else {
- growingString += string(l.peek())
- }
-
- if l.next() == eof {
- break
- }
- }
-
- return l.errorf("unclosed string")
-}
-
-func (l *queryLexer) lexNumber() queryLexStateFn {
- l.ignore()
- if !l.accept("+") {
- l.accept("-")
- }
- pointSeen := false
- digitSeen := false
- for {
- next := l.next()
- if next == '.' {
- if pointSeen {
- return l.errorf("cannot have two dots in one float")
- }
- if !isDigit(l.peek()) {
- return l.errorf("float cannot end with a dot")
- }
- pointSeen = true
- } else if isDigit(next) {
- digitSeen = true
- } else {
- l.backup()
- break
- }
- if pointSeen && !digitSeen {
- return l.errorf("cannot start float with a dot")
- }
- }
-
- if !digitSeen {
- return l.errorf("no digit in that number")
- }
- if pointSeen {
- l.emit(tokenFloat)
- } else {
- l.emit(tokenInteger)
- }
- return l.lexVoid
-}
-
-// Entry point
-func lexQuery(input string) chan token {
- l := &queryLexer{
- input: input,
- tokens: make(chan token),
- line: 1,
- col: 1,
- }
- go l.run()
- return l.tokens
-}
diff --git a/vendor/github.com/pelletier/go-toml/queryparser.go b/vendor/github.com/pelletier/go-toml/queryparser.go
deleted file mode 100644
index 1cbfc83..0000000
--- a/vendor/github.com/pelletier/go-toml/queryparser.go
+++ /dev/null
@@ -1,275 +0,0 @@
-/*
- Based on the "jsonpath" spec/concept.
-
- http://goessner.net/articles/JsonPath/
- https://code.google.com/p/json-path/
-*/
-
-package toml
-
-import (
- "fmt"
-)
-
-const maxInt = int(^uint(0) >> 1)
-
-type queryParser struct {
- flow chan token
- tokensBuffer []token
- query *Query
- union []pathFn
- err error
-}
-
-type queryParserStateFn func() queryParserStateFn
-
-// Formats and panics an error message based on a token
-func (p *queryParser) parseError(tok *token, msg string, args ...interface{}) queryParserStateFn {
- p.err = fmt.Errorf(tok.Position.String()+": "+msg, args...)
- return nil // trigger parse to end
-}
-
-func (p *queryParser) run() {
- for state := p.parseStart; state != nil; {
- state = state()
- }
-}
-
-func (p *queryParser) backup(tok *token) {
- p.tokensBuffer = append(p.tokensBuffer, *tok)
-}
-
-func (p *queryParser) peek() *token {
- if len(p.tokensBuffer) != 0 {
- return &(p.tokensBuffer[0])
- }
-
- tok, ok := <-p.flow
- if !ok {
- return nil
- }
- p.backup(&tok)
- return &tok
-}
-
-func (p *queryParser) lookahead(types ...tokenType) bool {
- result := true
- buffer := []token{}
-
- for _, typ := range types {
- tok := p.getToken()
- if tok == nil {
- result = false
- break
- }
- buffer = append(buffer, *tok)
- if tok.typ != typ {
- result = false
- break
- }
- }
- // add the tokens back to the buffer, and return
- p.tokensBuffer = append(p.tokensBuffer, buffer...)
- return result
-}
-
-func (p *queryParser) getToken() *token {
- if len(p.tokensBuffer) != 0 {
- tok := p.tokensBuffer[0]
- p.tokensBuffer = p.tokensBuffer[1:]
- return &tok
- }
- tok, ok := <-p.flow
- if !ok {
- return nil
- }
- return &tok
-}
-
-func (p *queryParser) parseStart() queryParserStateFn {
- tok := p.getToken()
-
- if tok == nil || tok.typ == tokenEOF {
- return nil
- }
-
- if tok.typ != tokenDollar {
- return p.parseError(tok, "Expected '$' at start of expression")
- }
-
- return p.parseMatchExpr
-}
-
-// handle '.' prefix, '[]', and '..'
-func (p *queryParser) parseMatchExpr() queryParserStateFn {
- tok := p.getToken()
- switch tok.typ {
- case tokenDotDot:
- p.query.appendPath(&matchRecursiveFn{})
- // nested parse for '..'
- tok := p.getToken()
- switch tok.typ {
- case tokenKey:
- p.query.appendPath(newMatchKeyFn(tok.val))
- return p.parseMatchExpr
- case tokenLeftBracket:
- return p.parseBracketExpr
- case tokenStar:
- // do nothing - the recursive predicate is enough
- return p.parseMatchExpr
- }
-
- case tokenDot:
- // nested parse for '.'
- tok := p.getToken()
- switch tok.typ {
- case tokenKey:
- p.query.appendPath(newMatchKeyFn(tok.val))
- return p.parseMatchExpr
- case tokenStar:
- p.query.appendPath(&matchAnyFn{})
- return p.parseMatchExpr
- }
-
- case tokenLeftBracket:
- return p.parseBracketExpr
-
- case tokenEOF:
- return nil // allow EOF at this stage
- }
- return p.parseError(tok, "expected match expression")
-}
-
-func (p *queryParser) parseBracketExpr() queryParserStateFn {
- if p.lookahead(tokenInteger, tokenColon) {
- return p.parseSliceExpr
- }
- if p.peek().typ == tokenColon {
- return p.parseSliceExpr
- }
- return p.parseUnionExpr
-}
-
-func (p *queryParser) parseUnionExpr() queryParserStateFn {
- var tok *token
-
- // this state can be traversed after some sub-expressions
- // so be careful when setting up state in the parser
- if p.union == nil {
- p.union = []pathFn{}
- }
-
-loop: // labeled loop for easy breaking
- for {
- if len(p.union) > 0 {
- // parse delimiter or terminator
- tok = p.getToken()
- switch tok.typ {
- case tokenComma:
- // do nothing
- case tokenRightBracket:
- break loop
- default:
- return p.parseError(tok, "expected ',' or ']', not '%s'", tok.val)
- }
- }
-
- // parse sub expression
- tok = p.getToken()
- switch tok.typ {
- case tokenInteger:
- p.union = append(p.union, newMatchIndexFn(tok.Int()))
- case tokenKey:
- p.union = append(p.union, newMatchKeyFn(tok.val))
- case tokenString:
- p.union = append(p.union, newMatchKeyFn(tok.val))
- case tokenQuestion:
- return p.parseFilterExpr
- default:
- return p.parseError(tok, "expected union sub expression, not '%s', %d", tok.val, len(p.union))
- }
- }
-
- // if there is only one sub-expression, use that instead
- if len(p.union) == 1 {
- p.query.appendPath(p.union[0])
- } else {
- p.query.appendPath(&matchUnionFn{p.union})
- }
-
- p.union = nil // clear out state
- return p.parseMatchExpr
-}
-
-func (p *queryParser) parseSliceExpr() queryParserStateFn {
- // init slice to grab all elements
- start, end, step := 0, maxInt, 1
-
- // parse optional start
- tok := p.getToken()
- if tok.typ == tokenInteger {
- start = tok.Int()
- tok = p.getToken()
- }
- if tok.typ != tokenColon {
- return p.parseError(tok, "expected ':'")
- }
-
- // parse optional end
- tok = p.getToken()
- if tok.typ == tokenInteger {
- end = tok.Int()
- tok = p.getToken()
- }
- if tok.typ == tokenRightBracket {
- p.query.appendPath(newMatchSliceFn(start, end, step))
- return p.parseMatchExpr
- }
- if tok.typ != tokenColon {
- return p.parseError(tok, "expected ']' or ':'")
- }
-
- // parse optional step
- tok = p.getToken()
- if tok.typ == tokenInteger {
- step = tok.Int()
- if step < 0 {
- return p.parseError(tok, "step must be a positive value")
- }
- tok = p.getToken()
- }
- if tok.typ != tokenRightBracket {
- return p.parseError(tok, "expected ']'")
- }
-
- p.query.appendPath(newMatchSliceFn(start, end, step))
- return p.parseMatchExpr
-}
-
-func (p *queryParser) parseFilterExpr() queryParserStateFn {
- tok := p.getToken()
- if tok.typ != tokenLeftParen {
- return p.parseError(tok, "expected left-parenthesis for filter expression")
- }
- tok = p.getToken()
- if tok.typ != tokenKey && tok.typ != tokenString {
- return p.parseError(tok, "expected key or string for filter funciton name")
- }
- name := tok.val
- tok = p.getToken()
- if tok.typ != tokenRightParen {
- return p.parseError(tok, "expected right-parenthesis for filter expression")
- }
- p.union = append(p.union, newMatchFilterFn(name, tok.Position))
- return p.parseUnionExpr
-}
-
-func parseQuery(flow chan token) (*Query, error) {
- parser := &queryParser{
- flow: flow,
- tokensBuffer: []token{},
- query: newQuery(),
- }
- parser.run()
- return parser.query, parser.err
-}
diff --git a/vendor/github.com/pelletier/go-toml/test.sh b/vendor/github.com/pelletier/go-toml/test.sh
index 436d2fb..91a8896 100755
--- a/vendor/github.com/pelletier/go-toml/test.sh
+++ b/vendor/github.com/pelletier/go-toml/test.sh
@@ -19,11 +19,16 @@ function git_clone() {
popd
}
+# Remove potential previous runs
+rm -rf src test_program_bin toml-test
+
# Run go vet
go vet ./...
go get github.com/pelletier/go-buffruneio
go get github.com/davecgh/go-spew/spew
+go get gopkg.in/yaml.v2
+go get github.com/BurntSushi/toml
# get code for BurntSushi TOML validation
# pinning all to 'HEAD' for version 0.3.x work (TODO: pin to commit hash when tests stabilize)
@@ -36,13 +41,16 @@ go build -o toml-test github.com/BurntSushi/toml-test
# vendorize the current lib for testing
# NOTE: this basically mocks an install without having to go back out to github for code
mkdir -p src/github.com/pelletier/go-toml/cmd
+mkdir -p src/github.com/pelletier/go-toml/query
cp *.go *.toml src/github.com/pelletier/go-toml
cp -R cmd/* src/github.com/pelletier/go-toml/cmd
+cp -R query/* src/github.com/pelletier/go-toml/query
go build -o test_program_bin src/github.com/pelletier/go-toml/cmd/test_program.go
# Run basic unit tests
-go test github.com/pelletier/go-toml -v -covermode=count -coverprofile=coverage.out
+go test github.com/pelletier/go-toml -covermode=count -coverprofile=coverage.out
go test github.com/pelletier/go-toml/cmd/tomljson
+go test github.com/pelletier/go-toml/query
# run the entire BurntSushi test suite
if [[ $# -eq 0 ]] ; then
diff --git a/vendor/github.com/pelletier/go-toml/toml.go b/vendor/github.com/pelletier/go-toml/toml.go
index 1ba56a1..c3e3243 100644
--- a/vendor/github.com/pelletier/go-toml/toml.go
+++ b/vendor/github.com/pelletier/go-toml/toml.go
@@ -4,40 +4,50 @@ import (
"errors"
"fmt"
"io"
+ "io/ioutil"
"os"
"runtime"
"strings"
)
type tomlValue struct {
- value interface{} // string, int64, uint64, float64, bool, time.Time, [] of any of this list
- position Position
+ value interface{} // string, int64, uint64, float64, bool, time.Time, [] of any of this list
+ comment string
+ commented bool
+ position Position
}
-// TomlTree is the result of the parsing of a TOML file.
-type TomlTree struct {
- values map[string]interface{} // string -> *tomlValue, *TomlTree, []*TomlTree
- position Position
+// Tree is the result of the parsing of a TOML file.
+type Tree struct {
+ values map[string]interface{} // string -> *tomlValue, *Tree, []*Tree
+ comment string
+ commented bool
+ position Position
}
-func newTomlTree() *TomlTree {
- return &TomlTree{
+func newTree() *Tree {
+ return &Tree{
values: make(map[string]interface{}),
position: Position{},
}
}
-// TreeFromMap initializes a new TomlTree object using the given map.
-func TreeFromMap(m map[string]interface{}) (*TomlTree, error) {
+// TreeFromMap initializes a new Tree object using the given map.
+func TreeFromMap(m map[string]interface{}) (*Tree, error) {
result, err := toTree(m)
if err != nil {
return nil, err
}
- return result.(*TomlTree), nil
+ return result.(*Tree), nil
+}
+
+// Position returns the position of the tree.
+func (t *Tree) Position() Position {
+ return t.position
}
// Has returns a boolean indicating if the given key exists.
-func (t *TomlTree) Has(key string) bool {
+func (t *Tree) Has(key string) bool {
if key == "" {
return false
}
@@ -45,25 +55,26 @@ func (t *TomlTree) Has(key string) bool {
}
// HasPath returns true if the given path of keys exists, false otherwise.
-func (t *TomlTree) HasPath(keys []string) bool {
+func (t *Tree) HasPath(keys []string) bool {
return t.GetPath(keys) != nil
}
-// Keys returns the keys of the toplevel tree.
-// Warning: this is a costly operation.
-func (t *TomlTree) Keys() []string {
- var keys []string
+// Keys returns the keys of the toplevel tree (does not recurse).
+func (t *Tree) Keys() []string {
+ keys := make([]string, len(t.values))
+ i := 0
for k := range t.values {
- keys = append(keys, k)
+ keys[i] = k
+ i++
}
return keys
}
-// Get the value at key in the TomlTree.
+// Get the value at key in the Tree.
// Key is a dot-separated path (e.g. a.b.c).
// Returns nil if the path does not exist in the tree.
// If keys is of length zero, the current tree is returned.
-func (t *TomlTree) Get(key string) interface{} {
+func (t *Tree) Get(key string) interface{} {
if key == "" {
return t
}
@@ -76,7 +87,7 @@ func (t *TomlTree) Get(key string) interface{} {
// GetPath returns the element in the tree indicated by 'keys'.
// If keys is of length zero, the current tree is returned.
-func (t *TomlTree) GetPath(keys []string) interface{} {
+func (t *Tree) GetPath(keys []string) interface{} {
if len(keys) == 0 {
return t
}
@@ -87,9 +98,9 @@ func (t *TomlTree) GetPath(keys []string) interface{} {
return nil
}
switch node := value.(type) {
- case *TomlTree:
+ case *Tree:
subtree = node
- case []*TomlTree:
+ case []*Tree:
// go to most recent element
if len(node) == 0 {
return nil
@@ -109,7 +120,7 @@ func (t *TomlTree) GetPath(keys []string) interface{} {
}
// GetPosition returns the position of the given key.
-func (t *TomlTree) GetPosition(key string) Position {
+func (t *Tree) GetPosition(key string) Position {
if key == "" {
return t.position
}
@@ -118,7 +129,7 @@ func (t *TomlTree) GetPosition(key string) Position {
// GetPositionPath returns the element in the tree indicated by 'keys'.
// If keys is of length zero, the current tree is returned.
-func (t *TomlTree) GetPositionPath(keys []string) Position {
+func (t *Tree) GetPositionPath(keys []string) Position {
if len(keys) == 0 {
return t.position
}
@@ -129,9 +140,9 @@ func (t *TomlTree) GetPositionPath(keys []string) Position {
return Position{0, 0}
}
switch node := value.(type) {
- case *TomlTree:
+ case *Tree:
subtree = node
- case []*TomlTree:
+ case []*Tree:
// go to most recent element
if len(node) == 0 {
return Position{0, 0}
@@ -145,9 +156,9 @@ func (t *TomlTree) GetPositionPath(keys []string) Position {
switch node := subtree.values[keys[len(keys)-1]].(type) {
case *tomlValue:
return node.position
- case *TomlTree:
+ case *Tree:
return node.position
- case []*TomlTree:
+ case []*Tree:
// go to most recent element
if len(node) == 0 {
return Position{0, 0}
@@ -159,7 +170,7 @@ func (t *TomlTree) GetPositionPath(keys []string) Position {
}
// GetDefault works like Get but with a default value
-func (t *TomlTree) GetDefault(key string, def interface{}) interface{} {
+func (t *Tree) GetDefault(key string, def interface{}) interface{} {
val := t.Get(key)
if val == nil {
return def
@@ -169,30 +180,30 @@ func (t *TomlTree) GetDefault(key string, def interface{}) interface{} {
// Set an element in the tree.
// Key is a dot-separated path (e.g. a.b.c).
-// Creates all necessary intermediates trees, if needed.
-func (t *TomlTree) Set(key string, value interface{}) {
- t.SetPath(strings.Split(key, "."), value)
+// Creates all necessary intermediate trees, if needed.
+func (t *Tree) Set(key string, comment string, commented bool, value interface{}) {
+ t.SetPath(strings.Split(key, "."), comment, commented, value)
}
// SetPath sets an element in the tree.
// Keys is an array of path elements (e.g. {"a","b","c"}).
-// Creates all necessary intermediates trees, if needed.
-func (t *TomlTree) SetPath(keys []string, value interface{}) {
+// Creates all necessary intermediate trees, if needed.
+func (t *Tree) SetPath(keys []string, comment string, commented bool, value interface{}) {
subtree := t
for _, intermediateKey := range keys[:len(keys)-1] {
nextTree, exists := subtree.values[intermediateKey]
if !exists {
- nextTree = newTomlTree()
+ nextTree = newTree()
subtree.values[intermediateKey] = nextTree // add new element here
}
switch node := nextTree.(type) {
- case *TomlTree:
+ case *Tree:
subtree = node
- case []*TomlTree:
+ case []*Tree:
// go to most recent element
if len(node) == 0 {
// create element if it does not exist
- subtree.values[intermediateKey] = append(node, newTomlTree())
+ subtree.values[intermediateKey] = append(node, newTree())
}
subtree = node[len(node)-1]
}
@@ -201,14 +212,18 @@ func (t *TomlTree) SetPath(keys []string, value interface{}) {
var toInsert interface{}
switch value.(type) {
- case *TomlTree:
+ case *Tree:
+ tt := value.(*Tree)
+ tt.comment = comment
toInsert = value
- case []*TomlTree:
+ case []*Tree:
toInsert = value
case *tomlValue:
- toInsert = value
+ tt := value.(*tomlValue)
+ tt.comment = comment
+ toInsert = tt
default:
- toInsert = &tomlValue{value: value}
+ toInsert = &tomlValue{value: value, comment: comment, commented: commented}
}
subtree.values[keys[len(keys)-1]] = toInsert
@@ -221,21 +236,21 @@ func (t *TomlTree) SetPath(keys []string, value interface{}) {
// and tree[a][b][c]
//
// Returns nil on success, error object on failure
-func (t *TomlTree) createSubTree(keys []string, pos Position) error {
+func (t *Tree) createSubTree(keys []string, pos Position) error {
subtree := t
for _, intermediateKey := range keys {
nextTree, exists := subtree.values[intermediateKey]
if !exists {
- tree := newTomlTree()
+ tree := newTree()
tree.position = pos
subtree.values[intermediateKey] = tree
nextTree = tree
}
switch node := nextTree.(type) {
- case []*TomlTree:
+ case []*Tree:
subtree = node[len(node)-1]
- case *TomlTree:
+ case *Tree:
subtree = node
default:
return fmt.Errorf("unknown type for path %s (%s): %T (%#v)",
@@ -245,17 +260,8 @@ func (t *TomlTree) createSubTree(keys []string, pos Position) error {
return nil
}
-// Query compiles and executes a query on a tree and returns the query result.
-func (t *TomlTree) Query(query string) (*QueryResult, error) {
- q, err := CompileQuery(query)
- if err != nil {
- return nil, err
- }
- return q.Execute(t), nil
-}
-
-// LoadReader creates a TomlTree from any io.Reader.
-func LoadReader(reader io.Reader) (tree *TomlTree, err error) {
+// LoadBytes creates a Tree from a []byte.
+func LoadBytes(b []byte) (tree *Tree, err error) {
defer func() {
if r := recover(); r != nil {
if _, ok := r.(runtime.Error); ok {
@@ -264,17 +270,27 @@ func LoadReader(reader io.Reader) (tree *TomlTree, err error) {
err = errors.New(r.(string))
}
}()
- tree = parseToml(lexToml(reader))
+ tree = parseToml(lexToml(b))
+ return
+}
+
+// LoadReader creates a Tree from any io.Reader.
+func LoadReader(reader io.Reader) (tree *Tree, err error) {
+ inputBytes, err := ioutil.ReadAll(reader)
+ if err != nil {
+ return
+ }
+ tree, err = LoadBytes(inputBytes)
return
}
-// Load creates a TomlTree from a string.
-func Load(content string) (tree *TomlTree, err error) {
- return LoadReader(strings.NewReader(content))
+// Load creates a Tree from a string.
+func Load(content string) (tree *Tree, err error) {
+ return LoadBytes([]byte(content))
}
-// LoadFile creates a TomlTree from a file.
-func LoadFile(path string) (tree *TomlTree, err error) {
+// LoadFile creates a Tree from a file.
+func LoadFile(path string) (tree *Tree, err error) {
file, err := os.Open(path)
if err != nil {
return nil, err
diff --git a/vendor/github.com/pelletier/go-toml/tomltree_create.go b/vendor/github.com/pelletier/go-toml/tomltree_create.go
index c6054f3..79610e9 100644
--- a/vendor/github.com/pelletier/go-toml/tomltree_create.go
+++ b/vendor/github.com/pelletier/go-toml/tomltree_create.go
@@ -6,10 +6,7 @@ import (
"time"
)
-// supported values:
-// string, bool, int64, uint64, float64, time.Time, int, int8, int16, int32, uint, uint8, uint16, uint32, float32
-
-var kindToTypeMapping = map[reflect.Kind]reflect.Type{
+var kindToType = [reflect.String + 1]reflect.Type{
reflect.Bool: reflect.TypeOf(true),
reflect.String: reflect.TypeOf(""),
reflect.Float32: reflect.TypeOf(float64(1)),
@@ -26,6 +23,16 @@ var kindToTypeMapping = map[reflect.Kind]reflect.Type{
reflect.Uint64: reflect.TypeOf(uint64(1)),
}
+// typeFor returns a reflect.Type for a reflect.Kind, or nil if none is found.
+// supported values:
+// string, bool, int64, uint64, float64, time.Time, int, int8, int16, int32, uint, uint8, uint16, uint32, float32
+func typeFor(k reflect.Kind) reflect.Type {
+ if k > 0 && int(k) < len(kindToType) {
+ return kindToType[k]
+ }
+ return nil
+}
+
func simpleValueCoercion(object interface{}) (interface{}, error) {
switch original := object.(type) {
case string, bool, int64, uint64, float64, time.Time:
@@ -51,7 +58,7 @@ func simpleValueCoercion(object interface{}) (interface{}, error) {
case fmt.Stringer:
return original.String(), nil
default:
- return nil, fmt.Errorf("cannot convert type %T to TomlTree", object)
+ return nil, fmt.Errorf("cannot convert type %T to Tree", object)
}
}
@@ -59,7 +66,7 @@ func sliceToTree(object interface{}) (interface{}, error) {
// arrays are a bit tricky, since they can represent either a
// collection of simple values, which is represented by one
// *tomlValue, or an array of tables, which is represented by an
- // array of *TomlTree.
+ // array of *Tree.
// holding the assumption that this function is called from toTree only when value.Kind() is Array or Slice
value := reflect.ValueOf(object)
@@ -70,19 +77,19 @@ func sliceToTree(object interface{}) (interface{}, error) {
}
if insideType.Kind() == reflect.Map {
// this is considered as an array of tables
- tablesArray := make([]*TomlTree, 0, length)
+ tablesArray := make([]*Tree, 0, length)
for i := 0; i < length; i++ {
table := value.Index(i)
tree, err := toTree(table.Interface())
if err != nil {
return nil, err
}
- tablesArray = append(tablesArray, tree.(*TomlTree))
+ tablesArray = append(tablesArray, tree.(*Tree))
}
return tablesArray, nil
}
- sliceType := kindToTypeMapping[insideType.Kind()]
+ sliceType := typeFor(insideType.Kind())
if sliceType == nil {
sliceType = insideType
}
@@ -97,7 +104,7 @@ func sliceToTree(object interface{}) (interface{}, error) {
}
arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue))
}
- return &tomlValue{arrayValue.Interface(), Position{}}, nil
+ return &tomlValue{value: arrayValue.Interface(), position: Position{}}, nil
}
func toTree(object interface{}) (interface{}, error) {
@@ -120,7 +127,7 @@ func toTree(object interface{}) (interface{}, error) {
}
values[key.String()] = newValue
}
- return &TomlTree{values, Position{}}, nil
+ return &Tree{values: values, position: Position{}}, nil
}
if value.Kind() == reflect.Array || value.Kind() == reflect.Slice {
@@ -131,5 +138,5 @@ func toTree(object interface{}) (interface{}, error) {
if err != nil {
return nil, err
}
- return &tomlValue{simpleValue, Position{}}, nil
+ return &tomlValue{value: simpleValue, position: Position{}}, nil
}
diff --git a/vendor/github.com/pelletier/go-toml/tomltree_write.go b/vendor/github.com/pelletier/go-toml/tomltree_write.go
index 6a7fa17..449f35a 100644
--- a/vendor/github.com/pelletier/go-toml/tomltree_write.go
+++ b/vendor/github.com/pelletier/go-toml/tomltree_write.go
@@ -4,42 +4,44 @@ import (
"bytes"
"fmt"
"io"
+ "math"
+ "reflect"
"sort"
"strconv"
"strings"
"time"
- "reflect"
)
// encodes a string to a TOML-compliant string value
func encodeTomlString(value string) string {
- result := ""
+ var b bytes.Buffer
+
for _, rr := range value {
switch rr {
case '\b':
- result += "\\b"
+ b.WriteString(`\b`)
case '\t':
- result += "\\t"
+ b.WriteString(`\t`)
case '\n':
- result += "\\n"
+ b.WriteString(`\n`)
case '\f':
- result += "\\f"
+ b.WriteString(`\f`)
case '\r':
- result += "\\r"
+ b.WriteString(`\r`)
case '"':
- result += "\\\""
+ b.WriteString(`\"`)
case '\\':
- result += "\\\\"
+ b.WriteString(`\\`)
default:
intRr := uint16(rr)
if intRr < 0x001F {
- result += fmt.Sprintf("\\u%0.4X", intRr)
+ b.WriteString(fmt.Sprintf("\\u%0.4X", intRr))
} else {
- result += string(rr)
+ b.WriteRune(rr)
}
}
}
- return result
+ return b.String()
}
func tomlValueStringRepresentation(v interface{}) (string, error) {
@@ -49,6 +51,11 @@ func tomlValueStringRepresentation(v interface{}) (string, error) {
case int64:
return strconv.FormatInt(value, 10), nil
case float64:
+ // Ensure a round float does contain a decimal point. Otherwise feeding
+ // the output back to the parser would convert to an integer.
+ if math.Trunc(value) == value {
+ return strconv.FormatFloat(value, 'f', 1, 32), nil
+ }
return strconv.FormatFloat(value, 'f', -1, 32), nil
case string:
return "\"" + encodeTomlString(value) + "\"", nil
@@ -83,14 +90,14 @@ func tomlValueStringRepresentation(v interface{}) (string, error) {
return "", fmt.Errorf("unsupported value type %T: %v", v, v)
}
-func (t *TomlTree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64) (int64, error) {
+func (t *Tree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64) (int64, error) {
simpleValuesKeys := make([]string, 0)
complexValuesKeys := make([]string, 0)
for k := range t.values {
v := t.values[k]
switch v.(type) {
- case *TomlTree, []*TomlTree:
+ case *Tree, []*Tree:
complexValuesKeys = append(complexValuesKeys, k)
default:
simpleValuesKeys = append(simpleValuesKeys, k)
@@ -111,8 +118,24 @@ func (t *TomlTree) writeTo(w io.Writer, indent, keyspace string, bytesCount int6
return bytesCount, err
}
- kvRepr := fmt.Sprintf("%s%s = %s\n", indent, k, repr)
- writtenBytesCount, err := w.Write([]byte(kvRepr))
+ if v.comment != "" {
+ comment := strings.Replace(v.comment, "\n", "\n"+indent+"#", -1)
+ start := "# "
+ if strings.HasPrefix(comment, "#") {
+ start = ""
+ }
+ writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment, "\n")
+ bytesCount += int64(writtenBytesCountComment)
+ if errc != nil {
+ return bytesCount, errc
+ }
+ }
+
+ var commented string
+ if v.commented {
+ commented = "# "
+ }
+ writtenBytesCount, err := writeStrings(w, indent, commented, k, " = ", repr, "\n")
bytesCount += int64(writtenBytesCount)
if err != nil {
return bytesCount, err
@@ -126,12 +149,31 @@ func (t *TomlTree) writeTo(w io.Writer, indent, keyspace string, bytesCount int6
if keyspace != "" {
combinedKey = keyspace + "." + combinedKey
}
+ var commented string
+ if t.commented {
+ commented = "# "
+ }
switch node := v.(type) {
// node has to be of those two types given how keys are sorted above
- case *TomlTree:
- tableName := fmt.Sprintf("\n%s[%s]\n", indent, combinedKey)
- writtenBytesCount, err := w.Write([]byte(tableName))
+ case *Tree:
+ tv, ok := t.values[k].(*Tree)
+ if !ok {
+ return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k])
+ }
+ if tv.comment != "" {
+ comment := strings.Replace(tv.comment, "\n", "\n"+indent+"#", -1)
+ start := "# "
+ if strings.HasPrefix(comment, "#") {
+ start = ""
+ }
+ writtenBytesCountComment, errc := writeStrings(w, "\n", indent, start, comment)
+ bytesCount += int64(writtenBytesCountComment)
+ if errc != nil {
+ return bytesCount, errc
+ }
+ }
+ writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[", combinedKey, "]\n")
bytesCount += int64(writtenBytesCount)
if err != nil {
return bytesCount, err
@@ -140,20 +182,17 @@ func (t *TomlTree) writeTo(w io.Writer, indent, keyspace string, bytesCount int6
if err != nil {
return bytesCount, err
}
- case []*TomlTree:
+ case []*Tree:
for _, subTree := range node {
- if len(subTree.values) > 0 {
- tableArrayName := fmt.Sprintf("\n%s[[%s]]\n", indent, combinedKey)
- writtenBytesCount, err := w.Write([]byte(tableArrayName))
- bytesCount += int64(writtenBytesCount)
- if err != nil {
- return bytesCount, err
- }
-
- bytesCount, err = subTree.writeTo(w, indent+" ", combinedKey, bytesCount)
- if err != nil {
- return bytesCount, err
- }
+ writtenBytesCount, err := writeStrings(w, "\n", indent, commented, "[[", combinedKey, "]]\n")
+ bytesCount += int64(writtenBytesCount)
+ if err != nil {
+ return bytesCount, err
+ }
+
+ bytesCount, err = subTree.writeTo(w, indent+" ", combinedKey, bytesCount)
+ if err != nil {
+ return bytesCount, err
}
}
}
@@ -162,16 +201,28 @@ func (t *TomlTree) writeTo(w io.Writer, indent, keyspace string, bytesCount int6
return bytesCount, nil
}
-// WriteTo encode the TomlTree as Toml and writes it to the writer w.
+func writeStrings(w io.Writer, s ...string) (int, error) {
+ var n int
+ for i := range s {
+ b, err := io.WriteString(w, s[i])
+ n += b
+ if err != nil {
+ return n, err
+ }
+ }
+ return n, nil
+}
+
+// WriteTo encode the Tree as Toml and writes it to the writer w.
// Returns the number of bytes written in case of success, or an error if anything happened.
-func (t *TomlTree) WriteTo(w io.Writer) (int64, error) {
+func (t *Tree) WriteTo(w io.Writer) (int64, error) {
return t.writeTo(w, "", "", 0)
}
// ToTomlString generates a human-readable representation of the current tree.
// Output spans multiple lines, and is suitable for ingest by a TOML parser.
// If the conversion cannot be performed, ToString returns a non-nil error.
-func (t *TomlTree) ToTomlString() (string, error) {
+func (t *Tree) ToTomlString() (string, error) {
var buf bytes.Buffer
_, err := t.WriteTo(&buf)
if err != nil {
@@ -182,32 +233,34 @@ func (t *TomlTree) ToTomlString() (string, error) {
// String generates a human-readable representation of the current tree.
// Alias of ToString. Present to implement the fmt.Stringer interface.
-func (t *TomlTree) String() string {
+func (t *Tree) String() string {
result, _ := t.ToTomlString()
return result
}
// ToMap recursively generates a representation of the tree using Go built-in structures.
// The following types are used:
-// * uint64
-// * int64
-// * bool
-// * string
-// * time.Time
-// * map[string]interface{} (where interface{} is any of this list)
-// * []interface{} (where interface{} is any of this list)
-func (t *TomlTree) ToMap() map[string]interface{} {
+//
+// * bool
+// * float64
+// * int64
+// * string
+// * uint64
+// * time.Time
+// * map[string]interface{} (where interface{} is any of this list)
+// * []interface{} (where interface{} is any of this list)
+func (t *Tree) ToMap() map[string]interface{} {
result := map[string]interface{}{}
for k, v := range t.values {
switch node := v.(type) {
- case []*TomlTree:
+ case []*Tree:
var array []interface{}
for _, item := range node {
array = append(array, item.ToMap())
}
result[k] = array
- case *TomlTree:
+ case *Tree:
result[k] = node.ToMap()
case *tomlValue:
result[k] = node.value
diff --git a/vendor/github.com/pkg/browser/browser_openbsd.go b/vendor/github.com/pkg/browser/browser_openbsd.go
index 3c64118..4fc7ff0 100644
--- a/vendor/github.com/pkg/browser/browser_openbsd.go
+++ b/vendor/github.com/pkg/browser/browser_openbsd.go
@@ -5,12 +5,10 @@ import (
"os/exec"
)
-var errNoXdgOpen = errors.New("xdg-open: command not found - install xdg-utils from ports(8)")
-
func openBrowser(url string) error {
err := runCmd("xdg-open", url)
if e, ok := err.(*exec.Error); ok && e.Err == exec.ErrNotFound {
- return errNoXdgOpen
+ return errors.New("xdg-open: command not found - install xdg-utils from ports(8)")
}
return err
}
diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go
index 6b1f289..cbe3f3e 100644
--- a/vendor/github.com/pkg/errors/stack.go
+++ b/vendor/github.com/pkg/errors/stack.go
@@ -79,6 +79,14 @@ func (f Frame) Format(s fmt.State, verb rune) {
// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
type StackTrace []Frame
+// Format formats the stack of Frames according to the fmt.Formatter interface.
+//
+// %s lists source files for each Frame in the stack
+// %v lists the source file and line number for each Frame in the stack
+//
+// Format accepts flags that alter the printing of some verbs, as follows:
+//
+// %+v Prints filename, function, and line number for each Frame in the stack.
func (st StackTrace) Format(s fmt.State, verb rune) {
switch verb {
case 'v':
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
index 72d5256..273db5f 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/counter.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go
@@ -70,16 +70,12 @@ func (c *counter) Add(v float64) {
// if you want to count the same thing partitioned by various dimensions
// (e.g. number of HTTP requests, partitioned by response code and
// method). Create instances with NewCounterVec.
-//
-// CounterVec embeds MetricVec. See there for a full list of methods with
-// detailed documentation.
type CounterVec struct {
- *MetricVec
+ *metricVec
}
// NewCounterVec creates a new CounterVec based on the provided CounterOpts and
-// partitioned by the given label names. At least one label name must be
-// provided.
+// partitioned by the given label names.
func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
@@ -88,7 +84,7 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
opts.ConstLabels,
)
return &CounterVec{
- MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ metricVec: newMetricVec(desc, func(lvs ...string) Metric {
result := &counter{value: value{
desc: desc,
valType: CounterValue,
@@ -100,22 +96,51 @@ func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec {
}
}
-// GetMetricWithLabelValues replaces the method of the same name in
-// MetricVec. The difference is that this method returns a Counter and not a
-// Metric so that no type conversion is required.
+// GetMetricWithLabelValues returns the Counter for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new Counter is created.
+//
+// It is possible to call this method without using the returned Counter to only
+// create the new Counter but leave it at its starting value 0. See also the
+// SummaryVec example.
+//
+// Keeping the Counter for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Counter from the CounterVec. In that case,
+// the Counter will still exist, but it will not be exported anymore, even if a
+// Counter with the same label values is created later.
+//
+// An error is returned if the number of label values is not the same as the
+// number of VariableLabels in Desc.
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the GaugeVec example.
func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) {
- metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ metric, err := m.metricVec.getMetricWithLabelValues(lvs...)
if metric != nil {
return metric.(Counter), err
}
return nil, err
}
-// GetMetricWith replaces the method of the same name in MetricVec. The
-// difference is that this method returns a Counter and not a Metric so that no
-// type conversion is required.
+// GetMetricWith returns the Counter for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new Counter is created. Implications of
+// creating a Counter without using it and keeping the Counter for later use are
+// the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc.
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
- metric, err := m.MetricVec.GetMetricWith(labels)
+ metric, err := m.metricVec.getMetricWith(labels)
if metric != nil {
return metric.(Counter), err
}
@@ -127,14 +152,14 @@ func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) {
// error, WithLabelValues allows shortcuts like
// myVec.WithLabelValues("404", "GET").Add(42)
func (m *CounterVec) WithLabelValues(lvs ...string) Counter {
- return m.MetricVec.WithLabelValues(lvs...).(Counter)
+ return m.metricVec.withLabelValues(lvs...).(Counter)
}
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
// returned an error. By not returning an error, With allows shortcuts like
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
func (m *CounterVec) With(labels Labels) Counter {
- return m.MetricVec.With(labels).(Counter)
+ return m.metricVec.with(labels).(Counter)
}
// CounterFunc is a Counter whose value is determined at collect time by calling a
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
index 1835b16..920abc9 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/desc.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go
@@ -25,19 +25,6 @@ import (
dto "github.com/prometheus/client_model/go"
)
-// reservedLabelPrefix is a prefix which is not legal in user-supplied
-// label names.
-const reservedLabelPrefix = "__"
-
-// Labels represents a collection of label name -> value mappings. This type is
-// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
-// metric vector Collectors, e.g.:
-// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
-//
-// The other use-case is the specification of constant label pairs in Opts or to
-// create a Desc.
-type Labels map[string]string
-
// Desc is the descriptor used by every Prometheus Metric. It is essentially
// the immutable meta-data of a Metric. The normal Metric implementations
// included in this package manage their Desc under the hood. Users only have to
@@ -122,6 +109,12 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
for _, labelName := range labelNames {
labelValues = append(labelValues, constLabels[labelName])
}
+ // Validate the const label values. They can't have a wrong cardinality, so
+ // use in len(labelValues) as expectedNumberOfValues.
+ if err := validateLabelValues(labelValues, len(labelValues)); err != nil {
+ d.err = err
+ return d
+ }
// Now add the variable label names, but prefix them with something that
// cannot be in a regular label name. That prevents matching the label
// dimension with a different mix between preset and variable labels.
@@ -137,6 +130,7 @@ func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *
d.err = errors.New("duplicate label names")
return d
}
+
vh := hashNew()
for _, val := range labelValues {
vh = hashAdd(vh, val)
@@ -193,8 +187,3 @@ func (d *Desc) String() string {
d.variableLabels,
)
}
-
-func checkLabelName(l string) bool {
- return model.LabelName(l).IsValid() &&
- !strings.HasPrefix(l, reservedLabelPrefix)
-}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
index 1fdef9e..36ef155 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
@@ -26,6 +26,7 @@
// package main
//
// import (
+// "log"
// "net/http"
//
// "github.com/prometheus/client_golang/prometheus"
@@ -59,7 +60,7 @@
// // The Handler function provides a default handler to expose metrics
// // via an HTTP server. "/metrics" is the usual endpoint for that.
// http.Handle("/metrics", promhttp.Handler())
-// log.Fatal(http.ListenAndServe(":8080", nil))
+// log.Fatal(http.ListenAndServe(":8080", nil))
// }
//
//
@@ -144,7 +145,7 @@
// registry.
//
// So far, everything we did operated on the so-called default registry, as it
-// can be found in the global DefaultRegistry variable. With NewRegistry, you
+// can be found in the global DefaultRegisterer variable. With NewRegistry, you
// can create a custom registry, or you can even implement the Registerer or
// Gatherer interfaces yourself. The methods Register and Unregister work in the
// same way on a custom registry as the global functions Register and Unregister
@@ -152,11 +153,11 @@
//
// There are a number of uses for custom registries: You can use registries with
// special properties, see NewPedanticRegistry. You can avoid global state, as
-// it is imposed by the DefaultRegistry. You can use multiple registries at the
-// same time to expose different metrics in different ways. You can use separate
-// registries for testing purposes.
+// it is imposed by the DefaultRegisterer. You can use multiple registries at
+// the same time to expose different metrics in different ways. You can use
+// separate registries for testing purposes.
//
-// Also note that the DefaultRegistry comes registered with a Collector for Go
+// Also note that the DefaultRegisterer comes registered with a Collector for Go
// runtime metrics (via NewGoCollector) and a Collector for process metrics (via
// NewProcessCollector). With a custom registry, you are in control and decide
// yourself about the Collectors to register.
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
index 9ab5a3d..13064da 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go
@@ -63,12 +63,11 @@ func NewGauge(opts GaugeOpts) Gauge {
// (e.g. number of operations queued, partitioned by user and operation
// type). Create instances with NewGaugeVec.
type GaugeVec struct {
- *MetricVec
+ *metricVec
}
// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and
-// partitioned by the given label names. At least one label name must be
-// provided.
+// partitioned by the given label names.
func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
@@ -77,28 +76,57 @@ func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec {
opts.ConstLabels,
)
return &GaugeVec{
- MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ metricVec: newMetricVec(desc, func(lvs ...string) Metric {
return newValue(desc, GaugeValue, 0, lvs...)
}),
}
}
-// GetMetricWithLabelValues replaces the method of the same name in
-// MetricVec. The difference is that this method returns a Gauge and not a
-// Metric so that no type conversion is required.
+// GetMetricWithLabelValues returns the Gauge for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new Gauge is created.
+//
+// It is possible to call this method without using the returned Gauge to only
+// create the new Gauge but leave it at its starting value 0. See also the
+// SummaryVec example.
+//
+// Keeping the Gauge for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Gauge from the GaugeVec. In that case, the
+// Gauge will still exist, but it will not be exported anymore, even if a
+// Gauge with the same label values is created later. See also the CounterVec
+// example.
+//
+// An error is returned if the number of label values is not the same as the
+// number of VariableLabels in Desc.
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) {
- metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+ metric, err := m.metricVec.getMetricWithLabelValues(lvs...)
if metric != nil {
return metric.(Gauge), err
}
return nil, err
}
-// GetMetricWith replaces the method of the same name in MetricVec. The
-// difference is that this method returns a Gauge and not a Metric so that no
-// type conversion is required.
+// GetMetricWith returns the Gauge for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new Gauge is created. Implications of
+// creating a Gauge without using it and keeping the Gauge for later use are
+// the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc.
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
- metric, err := m.MetricVec.GetMetricWith(labels)
+ metric, err := m.metricVec.getMetricWith(labels)
if metric != nil {
return metric.(Gauge), err
}
@@ -110,14 +138,14 @@ func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) {
// error, WithLabelValues allows shortcuts like
// myVec.WithLabelValues("404", "GET").Add(42)
func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge {
- return m.MetricVec.WithLabelValues(lvs...).(Gauge)
+ return m.metricVec.withLabelValues(lvs...).(Gauge)
}
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
// returned an error. By not returning an error, With allows shortcuts like
// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
func (m *GaugeVec) With(labels Labels) Gauge {
- return m.MetricVec.With(labels).(Gauge)
+ return m.metricVec.with(labels).(Gauge)
}
// GaugeFunc is a Gauge whose value is determined at collect time by calling a
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
index f967645..096454a 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
@@ -11,6 +11,7 @@ type goCollector struct {
goroutinesDesc *Desc
threadsDesc *Desc
gcDesc *Desc
+ goInfoDesc *Desc
// metrics to describe and collect
metrics memStatsMetrics
@@ -26,12 +27,16 @@ func NewGoCollector() Collector {
nil, nil),
threadsDesc: NewDesc(
"go_threads",
- "Number of OS threads created",
+ "Number of OS threads created.",
nil, nil),
gcDesc: NewDesc(
"go_gc_duration_seconds",
"A summary of the GC invocation durations.",
nil, nil),
+ goInfoDesc: NewDesc(
+ "go_info",
+ "Information about the Go environment.",
+ nil, Labels{"version": runtime.Version()}),
metrics: memStatsMetrics{
{
desc: NewDesc(
@@ -239,6 +244,7 @@ func (c *goCollector) Describe(ch chan<- *Desc) {
ch <- c.goroutinesDesc
ch <- c.threadsDesc
ch <- c.gcDesc
+ ch <- c.goInfoDesc
for _, i := range c.metrics {
ch <- i.desc
}
@@ -261,6 +267,8 @@ func (c *goCollector) Collect(ch chan<- Metric) {
quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles)
+ ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
+
ms := &runtime.MemStats{}
runtime.ReadMemStats(ms)
for _, i := range c.metrics {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
index 9719e8f..6cc6e68 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go
@@ -287,12 +287,11 @@ func (h *histogram) Write(out *dto.Metric) error {
// (e.g. HTTP request latencies, partitioned by status code and method). Create
// instances with NewHistogramVec.
type HistogramVec struct {
- *MetricVec
+ *metricVec
}
// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and
-// partitioned by the given label names. At least one label name must be
-// provided.
+// partitioned by the given label names.
func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
@@ -301,30 +300,60 @@ func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec {
opts.ConstLabels,
)
return &HistogramVec{
- MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ metricVec: newMetricVec(desc, func(lvs ...string) Metric {
return newHistogram(desc, opts, lvs...)
}),
}
}
-// GetMetricWithLabelValues replaces the method of the same name in
-// MetricVec. The difference is that this method returns a Histogram and not a
-// Metric so that no type conversion is required.
-func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) {
- metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+// GetMetricWithLabelValues returns the Histogram for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new Histogram is created.
+//
+// It is possible to call this method without using the returned Histogram to only
+// create the new Histogram but leave it at its starting value, a Histogram without
+// any observations.
+//
+// Keeping the Histogram for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Histogram from the HistogramVec. In that case, the
+// Histogram will still exist, but it will not be exported anymore, even if a
+// Histogram with the same label values is created later. See also the CounterVec
+// example.
+//
+// An error is returned if the number of label values is not the same as the
+// number of VariableLabels in Desc.
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the GaugeVec example.
+func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
+ metric, err := m.metricVec.getMetricWithLabelValues(lvs...)
if metric != nil {
- return metric.(Histogram), err
+ return metric.(Observer), err
}
return nil, err
}
-// GetMetricWith replaces the method of the same name in MetricVec. The
-// difference is that this method returns a Histogram and not a Metric so that no
-// type conversion is required.
-func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) {
- metric, err := m.MetricVec.GetMetricWith(labels)
+// GetMetricWith returns the Histogram for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new Histogram is created. Implications of
+// creating a Histogram without using it and keeping the Histogram for later use
+// are the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc.
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (m *HistogramVec) GetMetricWith(labels Labels) (Observer, error) {
+ metric, err := m.metricVec.getMetricWith(labels)
if metric != nil {
- return metric.(Histogram), err
+ return metric.(Observer), err
}
return nil, err
}
@@ -333,15 +362,15 @@ func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) {
// GetMetricWithLabelValues would have returned an error. By not returning an
// error, WithLabelValues allows shortcuts like
// myVec.WithLabelValues("404", "GET").Observe(42.21)
-func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram {
- return m.MetricVec.WithLabelValues(lvs...).(Histogram)
+func (m *HistogramVec) WithLabelValues(lvs ...string) Observer {
+ return m.metricVec.withLabelValues(lvs...).(Observer)
}
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
// returned an error. By not returning an error, With allows shortcuts like
// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
-func (m *HistogramVec) With(labels Labels) Histogram {
- return m.MetricVec.With(labels).(Histogram)
+func (m *HistogramVec) With(labels Labels) Observer {
+ return m.metricVec.with(labels).(Observer)
}
type constHistogram struct {
@@ -401,8 +430,8 @@ func NewConstHistogram(
buckets map[float64]uint64,
labelValues ...string,
) (Metric, error) {
- if len(desc.variableLabels) != len(labelValues) {
- return nil, errInconsistentCardinality
+ if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
+ return nil, err
}
return &constHistogram{
desc: desc,
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http.go b/vendor/github.com/prometheus/client_golang/prometheus/http.go
index d74fb48..bfee5c6 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/http.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/http.go
@@ -62,7 +62,8 @@ func giveBuf(buf *bytes.Buffer) {
//
// Deprecated: Please note the issues described in the doc comment of
// InstrumentHandler. You might want to consider using promhttp.Handler instead
-// (which is not instrumented).
+// (which is not instrumented, but can be instrumented with the tooling provided
+// in package promhttp).
func Handler() http.Handler {
return InstrumentHandler("prometheus", UninstrumentedHandler())
}
@@ -95,7 +96,7 @@ func UninstrumentedHandler() http.Handler {
closer.Close()
}
if lastErr != nil && buf.Len() == 0 {
- http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError)
+ http.Error(w, "No metrics encoded, last error:\n\n"+lastErr.Error(), http.StatusInternalServerError)
return
}
header := w.Header()
@@ -158,7 +159,8 @@ func nowSeries(t ...time.Time) nower {
// value. http_requests_total is a metric vector partitioned by HTTP method
// (label name "method") and HTTP status code (label name "code").
//
-// Deprecated: InstrumentHandler has several issues:
+// Deprecated: InstrumentHandler has several issues. Use the tooling provided in
+// package promhttp instead. The issues are the following:
//
// - It uses Summaries rather than Histograms. Summaries are not useful if
// aggregation across multiple instances is required.
@@ -174,10 +176,6 @@ func nowSeries(t ...time.Time) nower {
//
// - It has additional issues with HTTP/2, cf.
// https://github.com/prometheus/client_golang/issues/272.
-//
-// Upcoming versions of this package will provide ways of instrumenting HTTP
-// handlers that are more flexible and have fewer issues. Please prefer direct
-// instrumentation in the meantime.
func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc {
return InstrumentHandlerFunc(handlerName, handler.ServeHTTP)
}
@@ -187,7 +185,7 @@ func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFun
// issues).
//
// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as
-// InstrumentHandler is.
+// InstrumentHandler is. Use the tooling provided in package promhttp instead.
func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
return InstrumentHandlerFuncWithOpts(
SummaryOpts{
@@ -226,7 +224,7 @@ func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWri
// SummaryOpts.
//
// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as
-// InstrumentHandler is.
+// InstrumentHandler is. Use the tooling provided in package promhttp instead.
func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc {
return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP)
}
@@ -237,7 +235,7 @@ func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.Hand
// SummaryOpts are used.
//
// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons
-// as InstrumentHandler is.
+// as InstrumentHandler is. Use the tooling provided in package promhttp instead.
func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc {
reqCnt := NewCounterVec(
CounterOpts{
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/labels.go b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
new file mode 100644
index 0000000..2502e37
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/labels.go
@@ -0,0 +1,57 @@
+package prometheus
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+ "unicode/utf8"
+
+ "github.com/prometheus/common/model"
+)
+
+// Labels represents a collection of label name -> value mappings. This type is
+// commonly used with the With(Labels) and GetMetricWith(Labels) methods of
+// metric vector Collectors, e.g.:
+// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
+//
+// The other use-case is the specification of constant label pairs in Opts or to
+// create a Desc.
+type Labels map[string]string
+
+// reservedLabelPrefix is a prefix which is not legal in user-supplied
+// label names.
+const reservedLabelPrefix = "__"
+
+var errInconsistentCardinality = errors.New("inconsistent label cardinality")
+
+func validateValuesInLabels(labels Labels, expectedNumberOfValues int) error {
+ if len(labels) != expectedNumberOfValues {
+ return errInconsistentCardinality
+ }
+
+ for name, val := range labels {
+ if !utf8.ValidString(val) {
+ return fmt.Errorf("label %s: value %q is not valid UTF-8", name, val)
+ }
+ }
+
+ return nil
+}
+
+func validateLabelValues(vals []string, expectedNumberOfValues int) error {
+ if len(vals) != expectedNumberOfValues {
+ return errInconsistentCardinality
+ }
+
+ for _, val := range vals {
+ if !utf8.ValidString(val) {
+ return fmt.Errorf("label value %q is not valid UTF-8", val)
+ }
+ }
+
+ return nil
+}
+
+func checkLabelName(l string) bool {
+ return model.LabelName(l).IsValid() && !strings.HasPrefix(l, reservedLabelPrefix)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/observer.go b/vendor/github.com/prometheus/client_golang/prometheus/observer.go
new file mode 100644
index 0000000..b0520e8
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/observer.go
@@ -0,0 +1,50 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package prometheus
+
+// Observer is the interface that wraps the Observe method, which is used by
+// Histogram and Summary to add observations.
+type Observer interface {
+ Observe(float64)
+}
+
+// The ObserverFunc type is an adapter to allow the use of ordinary
+// functions as Observers. If f is a function with the appropriate
+// signature, ObserverFunc(f) is an Observer that calls f.
+//
+// This adapter is usually used in connection with the Timer type, and there are
+// two general use cases:
+//
+// The most common one is to use a Gauge as the Observer for a Timer.
+// See the "Gauge" Timer example.
+//
+// The more advanced use case is to create a function that dynamically decides
+// which Observer to use for observing the duration. See the "Complex" Timer
+// example.
+type ObserverFunc func(float64)
+
+// Observe calls f(value). It implements Observer.
+func (f ObserverFunc) Observe(value float64) {
+ f(value)
+}
+
+// ObserverVec is an interface implemented by `HistogramVec` and `SummaryVec`.
+type ObserverVec interface {
+ GetMetricWith(Labels) (Observer, error)
+ GetMetricWithLabelValues(lvs ...string) (Observer, error)
+ With(Labels) Observer
+ WithLabelValues(...string) Observer
+
+ Collector
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
new file mode 100644
index 0000000..5ee095b
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator.go
@@ -0,0 +1,199 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package promhttp
+
+import (
+ "bufio"
+ "io"
+ "net"
+ "net/http"
+)
+
+const (
+ closeNotifier = 1 << iota
+ flusher
+ hijacker
+ readerFrom
+ pusher
+)
+
+type delegator interface {
+ http.ResponseWriter
+
+ Status() int
+ Written() int64
+}
+
+type responseWriterDelegator struct {
+ http.ResponseWriter
+
+ handler, method string
+ status int
+ written int64
+ wroteHeader bool
+ observeWriteHeader func(int)
+}
+
+func (r *responseWriterDelegator) Status() int {
+ return r.status
+}
+
+func (r *responseWriterDelegator) Written() int64 {
+ return r.written
+}
+
+func (r *responseWriterDelegator) WriteHeader(code int) {
+ r.status = code
+ r.wroteHeader = true
+ r.ResponseWriter.WriteHeader(code)
+ if r.observeWriteHeader != nil {
+ r.observeWriteHeader(code)
+ }
+}
+
+func (r *responseWriterDelegator) Write(b []byte) (int, error) {
+ if !r.wroteHeader {
+ r.WriteHeader(http.StatusOK)
+ }
+ n, err := r.ResponseWriter.Write(b)
+ r.written += int64(n)
+ return n, err
+}
+
+type closeNotifierDelegator struct{ *responseWriterDelegator }
+type flusherDelegator struct{ *responseWriterDelegator }
+type hijackerDelegator struct{ *responseWriterDelegator }
+type readerFromDelegator struct{ *responseWriterDelegator }
+
+func (d *closeNotifierDelegator) CloseNotify() <-chan bool {
+ return d.ResponseWriter.(http.CloseNotifier).CloseNotify()
+}
+func (d *flusherDelegator) Flush() {
+ d.ResponseWriter.(http.Flusher).Flush()
+}
+func (d *hijackerDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) {
+ return d.ResponseWriter.(http.Hijacker).Hijack()
+}
+func (d *readerFromDelegator) ReadFrom(re io.Reader) (int64, error) {
+ if !d.wroteHeader {
+ d.WriteHeader(http.StatusOK)
+ }
+ n, err := d.ResponseWriter.(io.ReaderFrom).ReadFrom(re)
+ d.written += n
+ return n, err
+}
+
+var pickDelegator = make([]func(*responseWriterDelegator) delegator, 32)
+
+func init() {
+ // TODO(beorn7): Code generation would help here.
+ pickDelegator[0] = func(d *responseWriterDelegator) delegator { // 0
+ return d
+ }
+ pickDelegator[closeNotifier] = func(d *responseWriterDelegator) delegator { // 1
+ return closeNotifierDelegator{d}
+ }
+ pickDelegator[flusher] = func(d *responseWriterDelegator) delegator { // 2
+ return flusherDelegator{d}
+ }
+ pickDelegator[flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 3
+ return struct {
+ *responseWriterDelegator
+ http.Flusher
+ http.CloseNotifier
+ }{d, &flusherDelegator{d}, &closeNotifierDelegator{d}}
+ }
+ pickDelegator[hijacker] = func(d *responseWriterDelegator) delegator { // 4
+ return hijackerDelegator{d}
+ }
+ pickDelegator[hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 5
+ return struct {
+ *responseWriterDelegator
+ http.Hijacker
+ http.CloseNotifier
+ }{d, &hijackerDelegator{d}, &closeNotifierDelegator{d}}
+ }
+ pickDelegator[hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 6
+ return struct {
+ *responseWriterDelegator
+ http.Hijacker
+ http.Flusher
+ }{d, &hijackerDelegator{d}, &flusherDelegator{d}}
+ }
+ pickDelegator[hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 7
+ return struct {
+ *responseWriterDelegator
+ http.Hijacker
+ http.Flusher
+ http.CloseNotifier
+ }{d, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
+ }
+ pickDelegator[readerFrom] = func(d *responseWriterDelegator) delegator { // 8
+ return readerFromDelegator{d}
+ }
+ pickDelegator[readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 9
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.CloseNotifier
+ }{d, &readerFromDelegator{d}, &closeNotifierDelegator{d}}
+ }
+ pickDelegator[readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 10
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Flusher
+ }{d, &readerFromDelegator{d}, &flusherDelegator{d}}
+ }
+ pickDelegator[readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 11
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Flusher
+ http.CloseNotifier
+ }{d, &readerFromDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
+ }
+ pickDelegator[readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 12
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Hijacker
+ }{d, &readerFromDelegator{d}, &hijackerDelegator{d}}
+ }
+ pickDelegator[readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 13
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Hijacker
+ http.CloseNotifier
+ }{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}}
+ }
+ pickDelegator[readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 14
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Hijacker
+ http.Flusher
+ }{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}}
+ }
+ pickDelegator[readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 15
+ return struct {
+ *responseWriterDelegator
+ io.ReaderFrom
+ http.Hijacker
+ http.Flusher
+ http.CloseNotifier
+ }{d, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go
new file mode 100644
index 0000000..f4d386f
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_1_8.go
@@ -0,0 +1,181 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.8
+
+package promhttp
+
+import (
+ "io"
+ "net/http"
+)
+
+type pusherDelegator struct{ *responseWriterDelegator }
+
+func (d *pusherDelegator) Push(target string, opts *http.PushOptions) error {
+ return d.ResponseWriter.(http.Pusher).Push(target, opts)
+}
+
+func init() {
+ pickDelegator[pusher] = func(d *responseWriterDelegator) delegator { // 16
+ return pusherDelegator{d}
+ }
+ pickDelegator[pusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 17
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.CloseNotifier
+ }{d, &pusherDelegator{d}, &closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+flusher] = func(d *responseWriterDelegator) delegator { // 18
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Flusher
+ }{d, &pusherDelegator{d}, &flusherDelegator{d}}
+ }
+ pickDelegator[pusher+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 19
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Flusher
+ http.CloseNotifier
+ }{d, &pusherDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+hijacker] = func(d *responseWriterDelegator) delegator { // 20
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Hijacker
+ }{d, &pusherDelegator{d}, &hijackerDelegator{d}}
+ }
+ pickDelegator[pusher+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 21
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Hijacker
+ http.CloseNotifier
+ }{d, &pusherDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 22
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Hijacker
+ http.Flusher
+ }{d, &pusherDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}}
+ }
+ pickDelegator[pusher+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { //23
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ http.Hijacker
+ http.Flusher
+ http.CloseNotifier
+ }{d, &pusherDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom] = func(d *responseWriterDelegator) delegator { // 24
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ }{d, &pusherDelegator{d}, &readerFromDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+closeNotifier] = func(d *responseWriterDelegator) delegator { // 25
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.CloseNotifier
+ }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+flusher] = func(d *responseWriterDelegator) delegator { // 26
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Flusher
+ }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &flusherDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 27
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Flusher
+ http.CloseNotifier
+ }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+hijacker] = func(d *responseWriterDelegator) delegator { // 28
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Hijacker
+ }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+hijacker+closeNotifier] = func(d *responseWriterDelegator) delegator { // 29
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Hijacker
+ http.CloseNotifier
+ }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &closeNotifierDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+hijacker+flusher] = func(d *responseWriterDelegator) delegator { // 30
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Hijacker
+ http.Flusher
+ }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}}
+ }
+ pickDelegator[pusher+readerFrom+hijacker+flusher+closeNotifier] = func(d *responseWriterDelegator) delegator { // 31
+ return struct {
+ *responseWriterDelegator
+ http.Pusher
+ io.ReaderFrom
+ http.Hijacker
+ http.Flusher
+ http.CloseNotifier
+ }{d, &pusherDelegator{d}, &readerFromDelegator{d}, &hijackerDelegator{d}, &flusherDelegator{d}, &closeNotifierDelegator{d}}
+ }
+}
+
+func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
+ d := &responseWriterDelegator{
+ ResponseWriter: w,
+ observeWriteHeader: observeWriteHeaderFunc,
+ }
+
+ id := 0
+ if _, ok := w.(http.CloseNotifier); ok {
+ id += closeNotifier
+ }
+ if _, ok := w.(http.Flusher); ok {
+ id += flusher
+ }
+ if _, ok := w.(http.Hijacker); ok {
+ id += hijacker
+ }
+ if _, ok := w.(io.ReaderFrom); ok {
+ id += readerFrom
+ }
+ if _, ok := w.(http.Pusher); ok {
+ id += pusher
+ }
+
+ return pickDelegator[id](d)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go
new file mode 100644
index 0000000..8bb9b8b
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/delegator_pre_1_8.go
@@ -0,0 +1,44 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !go1.8
+
+package promhttp
+
+import (
+ "io"
+ "net/http"
+)
+
+func newDelegator(w http.ResponseWriter, observeWriteHeaderFunc func(int)) delegator {
+ d := &responseWriterDelegator{
+ ResponseWriter: w,
+ observeWriteHeader: observeWriteHeaderFunc,
+ }
+
+ id := 0
+ if _, ok := w.(http.CloseNotifier); ok {
+ id += closeNotifier
+ }
+ if _, ok := w.(http.Flusher); ok {
+ id += flusher
+ }
+ if _, ok := w.(http.Hijacker); ok {
+ id += hijacker
+ }
+ if _, ok := w.(io.ReaderFrom); ok {
+ id += readerFrom
+ }
+
+ return pickDelegator[id](d)
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
index b6dd5a2..2d67f24 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go
@@ -11,21 +11,24 @@
// See the License for the specific language governing permissions and
// limitations under the License.
-// Copyright (c) 2013, The Prometheus Authors
-// All rights reserved.
+// Package promhttp provides tooling around HTTP servers and clients.
//
-// Use of this source code is governed by a BSD-style license that can be found
-// in the LICENSE file.
-
-// Package promhttp contains functions to create http.Handler instances to
-// expose Prometheus metrics via HTTP. In later versions of this package, it
-// will also contain tooling to instrument instances of http.Handler and
-// http.RoundTripper.
+// First, the package allows the creation of http.Handler instances to expose
+// Prometheus metrics via HTTP. promhttp.Handler acts on the
+// prometheus.DefaultGatherer. With HandlerFor, you can create a handler for a
+// custom registry or anything that implements the Gatherer interface. It also
+// allows the creation of handlers that act differently on errors or allow to
+// log errors.
+//
+// Second, the package provides tooling to instrument instances of http.Handler
+// via middleware. Middleware wrappers follow the naming scheme
+// InstrumentHandlerX, where X describes the intended use of the middleware.
+// See each function's doc comment for specific details.
//
-// promhttp.Handler acts on the prometheus.DefaultGatherer. With HandlerFor,
-// you can create a handler for a custom registry or anything that implements
-// the Gatherer interface. It also allows to create handlers that act
-// differently on errors or allow to log errors.
+// Finally, the package allows for an http.RoundTripper to be instrumented via
+// middleware. Middleware wrappers follow the naming scheme
+// InstrumentRoundTripperX, where X describes the intended use of the
+// middleware. See each function's doc comment for specific details.
package promhttp
import (
@@ -125,7 +128,7 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler {
closer.Close()
}
if lastErr != nil && buf.Len() == 0 {
- http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError)
+ http.Error(w, "No metrics encoded, last error:\n\n"+lastErr.Error(), http.StatusInternalServerError)
return
}
header := w.Header()
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
new file mode 100644
index 0000000..65f9425
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
@@ -0,0 +1,98 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package promhttp
+
+import (
+ "net/http"
+ "time"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+// The RoundTripperFunc type is an adapter to allow the use of ordinary
+// functions as RoundTrippers. If f is a function with the appropriate
+// signature, RountTripperFunc(f) is a RoundTripper that calls f.
+type RoundTripperFunc func(req *http.Request) (*http.Response, error)
+
+// RoundTrip implements the RoundTripper interface.
+func (rt RoundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) {
+ return rt(r)
+}
+
+// InstrumentRoundTripperInFlight is a middleware that wraps the provided
+// http.RoundTripper. It sets the provided prometheus.Gauge to the number of
+// requests currently handled by the wrapped http.RoundTripper.
+//
+// See the example for ExampleInstrumentRoundTripperDuration for example usage.
+func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripper) RoundTripperFunc {
+ return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ gauge.Inc()
+ defer gauge.Dec()
+ return next.RoundTrip(r)
+ })
+}
+
+// InstrumentRoundTripperCounter is a middleware that wraps the provided
+// http.RoundTripper to observe the request result with the provided CounterVec.
+// The CounterVec must have zero, one, or two labels. The only allowed label
+// names are "code" and "method". The function panics if any other instance
+// labels are provided. Partitioning of the CounterVec happens by HTTP status
+// code and/or HTTP method if the respective instance label names are present
+// in the CounterVec. For unpartitioned counting, use a CounterVec with
+// zero labels.
+//
+// If the wrapped RoundTripper panics or returns a non-nil error, the Counter
+// is not incremented.
+//
+// See the example for ExampleInstrumentRoundTripperDuration for example usage.
+func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc {
+ code, method := checkLabels(counter)
+
+ return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ resp, err := next.RoundTrip(r)
+ if err == nil {
+ counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc()
+ }
+ return resp, err
+ })
+}
+
+// InstrumentRoundTripperDuration is a middleware that wraps the provided
+// http.RoundTripper to observe the request duration with the provided ObserverVec.
+// The ObserverVec must have zero, one, or two labels. The only allowed label
+// names are "code" and "method". The function panics if any other instance
+// labels are provided. The Observe method of the Observer in the ObserverVec
+// is called with the request duration in seconds. Partitioning happens by HTTP
+// status code and/or HTTP method if the respective instance label names are
+// present in the ObserverVec. For unpartitioned observations, use an
+// ObserverVec with zero labels. Note that partitioning of Histograms is
+// expensive and should be used judiciously.
+//
+// If the wrapped RoundTripper panics or returns a non-nil error, no values are
+// reported.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
+func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc {
+ code, method := checkLabels(obs)
+
+ return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ start := time.Now()
+ resp, err := next.RoundTrip(r)
+ if err == nil {
+ obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds())
+ }
+ return resp, err
+ })
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go
new file mode 100644
index 0000000..0bd80c3
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client_1_8.go
@@ -0,0 +1,144 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.8
+
+package promhttp
+
+import (
+ "context"
+ "crypto/tls"
+ "net/http"
+ "net/http/httptrace"
+ "time"
+)
+
+// InstrumentTrace is used to offer flexibility in instrumenting the available
+// httptrace.ClientTrace hook functions. Each function is passed a float64
+// representing the time in seconds since the start of the http request. A user
+// may choose to use separately buckets Histograms, or implement custom
+// instance labels on a per function basis.
+type InstrumentTrace struct {
+ GotConn func(float64)
+ PutIdleConn func(float64)
+ GotFirstResponseByte func(float64)
+ Got100Continue func(float64)
+ DNSStart func(float64)
+ DNSDone func(float64)
+ ConnectStart func(float64)
+ ConnectDone func(float64)
+ TLSHandshakeStart func(float64)
+ TLSHandshakeDone func(float64)
+ WroteHeaders func(float64)
+ Wait100Continue func(float64)
+ WroteRequest func(float64)
+}
+
+// InstrumentRoundTripperTrace is a middleware that wraps the provided
+// RoundTripper and reports times to hook functions provided in the
+// InstrumentTrace struct. Hook functions that are not present in the provided
+// InstrumentTrace struct are ignored. Times reported to the hook functions are
+// time since the start of the request. Only with Go1.9+, those times are
+// guaranteed to never be negative. (Earlier Go versions are not using a
+// monotonic clock.) Note that partitioning of Histograms is expensive and
+// should be used judiciously.
+//
+// For hook functions that receive an error as an argument, no observations are
+// made in the event of a non-nil error value.
+//
+// See the example for ExampleInstrumentRoundTripperDuration for example usage.
+func InstrumentRoundTripperTrace(it *InstrumentTrace, next http.RoundTripper) RoundTripperFunc {
+ return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
+ start := time.Now()
+
+ trace := &httptrace.ClientTrace{
+ GotConn: func(_ httptrace.GotConnInfo) {
+ if it.GotConn != nil {
+ it.GotConn(time.Since(start).Seconds())
+ }
+ },
+ PutIdleConn: func(err error) {
+ if err != nil {
+ return
+ }
+ if it.PutIdleConn != nil {
+ it.PutIdleConn(time.Since(start).Seconds())
+ }
+ },
+ DNSStart: func(_ httptrace.DNSStartInfo) {
+ if it.DNSStart != nil {
+ it.DNSStart(time.Since(start).Seconds())
+ }
+ },
+ DNSDone: func(_ httptrace.DNSDoneInfo) {
+ if it.DNSStart != nil {
+ it.DNSStart(time.Since(start).Seconds())
+ }
+ },
+ ConnectStart: func(_, _ string) {
+ if it.ConnectStart != nil {
+ it.ConnectStart(time.Since(start).Seconds())
+ }
+ },
+ ConnectDone: func(_, _ string, err error) {
+ if err != nil {
+ return
+ }
+ if it.ConnectDone != nil {
+ it.ConnectDone(time.Since(start).Seconds())
+ }
+ },
+ GotFirstResponseByte: func() {
+ if it.GotFirstResponseByte != nil {
+ it.GotFirstResponseByte(time.Since(start).Seconds())
+ }
+ },
+ Got100Continue: func() {
+ if it.Got100Continue != nil {
+ it.Got100Continue(time.Since(start).Seconds())
+ }
+ },
+ TLSHandshakeStart: func() {
+ if it.TLSHandshakeStart != nil {
+ it.TLSHandshakeStart(time.Since(start).Seconds())
+ }
+ },
+ TLSHandshakeDone: func(_ tls.ConnectionState, err error) {
+ if err != nil {
+ return
+ }
+ if it.TLSHandshakeDone != nil {
+ it.TLSHandshakeDone(time.Since(start).Seconds())
+ }
+ },
+ WroteHeaders: func() {
+ if it.WroteHeaders != nil {
+ it.WroteHeaders(time.Since(start).Seconds())
+ }
+ },
+ Wait100Continue: func() {
+ if it.Wait100Continue != nil {
+ it.Wait100Continue(time.Since(start).Seconds())
+ }
+ },
+ WroteRequest: func(_ httptrace.WroteRequestInfo) {
+ if it.WroteRequest != nil {
+ it.WroteRequest(time.Since(start).Seconds())
+ }
+ },
+ }
+ r = r.WithContext(httptrace.WithClientTrace(context.Background(), trace))
+
+ return next.RoundTrip(r)
+ })
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
new file mode 100644
index 0000000..3d145ad
--- /dev/null
+++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
@@ -0,0 +1,440 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package promhttp
+
+import (
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+
+ dto "github.com/prometheus/client_model/go"
+
+ "github.com/prometheus/client_golang/prometheus"
+)
+
+// magicString is used for the hacky label test in checkLabels. Remove once fixed.
+const magicString = "zZgWfBxLqvG8kc8IMv3POi2Bb0tZI3vAnBx+gBaFi9FyPzB/CzKUer1yufDa"
+
+// InstrumentHandlerInFlight is a middleware that wraps the provided
+// http.Handler. It sets the provided prometheus.Gauge to the number of
+// requests currently handled by the wrapped http.Handler.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handler {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ g.Inc()
+ defer g.Dec()
+ next.ServeHTTP(w, r)
+ })
+}
+
+// InstrumentHandlerDuration is a middleware that wraps the provided
+// http.Handler to observe the request duration with the provided ObserverVec.
+// The ObserverVec must have zero, one, or two labels. The only allowed label
+// names are "code" and "method". The function panics if any other instance
+// labels are provided. The Observe method of the Observer in the ObserverVec
+// is called with the request duration in seconds. Partitioning happens by HTTP
+// status code and/or HTTP method if the respective instance label names are
+// present in the ObserverVec. For unpartitioned observations, use an
+// ObserverVec with zero labels. Note that partitioning of Histograms is
+// expensive and should be used judiciously.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, no values are reported.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
+func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
+ code, method := checkLabels(obs)
+
+ if code {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ now := time.Now()
+ d := newDelegator(w, nil)
+ next.ServeHTTP(d, r)
+
+ obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds())
+ })
+ }
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ now := time.Now()
+ next.ServeHTTP(w, r)
+ obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds())
+ })
+}
+
+// InstrumentHandlerCounter is a middleware that wraps the provided
+// http.Handler to observe the request result with the provided CounterVec.
+// The CounterVec must have zero, one, or two labels. The only allowed label
+// names are "code" and "method". The function panics if any other instance
+// labels are provided. Partitioning of the CounterVec happens by HTTP status
+// code and/or HTTP method if the respective instance label names are present
+// in the CounterVec. For unpartitioned counting, use a CounterVec with
+// zero labels.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, the Counter is not incremented.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc {
+ code, method := checkLabels(counter)
+
+ if code {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ d := newDelegator(w, nil)
+ next.ServeHTTP(d, r)
+ counter.With(labels(code, method, r.Method, d.Status())).Inc()
+ })
+ }
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ next.ServeHTTP(w, r)
+ counter.With(labels(code, method, r.Method, 0)).Inc()
+ })
+}
+
+// InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided
+// http.Handler to observe with the provided ObserverVec the request duration
+// until the response headers are written. The ObserverVec must have zero, one,
+// or two labels. The only allowed label names are "code" and "method". The
+// function panics if any other instance labels are provided. The Observe
+// method of the Observer in the ObserverVec is called with the request
+// duration in seconds. Partitioning happens by HTTP status code and/or HTTP
+// method if the respective instance label names are present in the
+// ObserverVec. For unpartitioned observations, use an ObserverVec with zero
+// labels. Note that partitioning of Histograms is expensive and should be used
+// judiciously.
+//
+// If the wrapped Handler panics before calling WriteHeader, no value is
+// reported.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
+ code, method := checkLabels(obs)
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ now := time.Now()
+ d := newDelegator(w, func(status int) {
+ obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds())
+ })
+ next.ServeHTTP(d, r)
+ })
+}
+
+// InstrumentHandlerRequestSize is a middleware that wraps the provided
+// http.Handler to observe the request size with the provided ObserverVec.
+// The ObserverVec must have zero, one, or two labels. The only allowed label
+// names are "code" and "method". The function panics if any other instance
+// labels are provided. The Observe method of the Observer in the ObserverVec
+// is called with the request size in bytes. Partitioning happens by HTTP
+// status code and/or HTTP method if the respective instance label names are
+// present in the ObserverVec. For unpartitioned observations, use an
+// ObserverVec with zero labels. Note that partitioning of Histograms is
+// expensive and should be used judiciously.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, no values are reported.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
+ code, method := checkLabels(obs)
+
+ if code {
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ d := newDelegator(w, nil)
+ next.ServeHTTP(d, r)
+ size := computeApproximateRequestSize(r)
+ obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size))
+ })
+ }
+
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ next.ServeHTTP(w, r)
+ size := computeApproximateRequestSize(r)
+ obs.With(labels(code, method, r.Method, 0)).Observe(float64(size))
+ })
+}
+
+// InstrumentHandlerResponseSize is a middleware that wraps the provided
+// http.Handler to observe the response size with the provided ObserverVec.
+// The ObserverVec must have zero, one, or two labels. The only allowed label
+// names are "code" and "method". The function panics if any other instance
+// labels are provided. The Observe method of the Observer in the ObserverVec
+// is called with the response size in bytes. Partitioning happens by HTTP
+// status code and/or HTTP method if the respective instance label names are
+// present in the ObserverVec. For unpartitioned observations, use an
+// ObserverVec with zero labels. Note that partitioning of Histograms is
+// expensive and should be used judiciously.
+//
+// If the wrapped Handler does not set a status code, a status code of 200 is assumed.
+//
+// If the wrapped Handler panics, no values are reported.
+//
+// See the example for InstrumentHandlerDuration for example usage.
+func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler {
+ code, method := checkLabels(obs)
+ return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ d := newDelegator(w, nil)
+ next.ServeHTTP(d, r)
+ obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written()))
+ })
+}
+
+func checkLabels(c prometheus.Collector) (code bool, method bool) {
+ // TODO(beorn7): Remove this hacky way to check for instance labels
+ // once Descriptors can have their dimensionality queried.
+ var (
+ desc *prometheus.Desc
+ pm dto.Metric
+ )
+
+ descc := make(chan *prometheus.Desc, 1)
+ c.Describe(descc)
+
+ select {
+ case desc = <-descc:
+ default:
+ panic("no description provided by collector")
+ }
+ select {
+ case <-descc:
+ panic("more than one description provided by collector")
+ default:
+ }
+
+ close(descc)
+
+ if _, err := prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0); err == nil {
+ return
+ }
+ if m, err := prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, magicString); err == nil {
+ if err := m.Write(&pm); err != nil {
+ panic("error checking metric for labels")
+ }
+ for _, label := range pm.Label {
+ name, value := label.GetName(), label.GetValue()
+ if value != magicString {
+ continue
+ }
+ switch name {
+ case "code":
+ code = true
+ case "method":
+ method = true
+ default:
+ panic("metric partitioned with non-supported labels")
+ }
+ return
+ }
+ panic("previously set label not found – this must never happen")
+ }
+ if m, err := prometheus.NewConstMetric(desc, prometheus.UntypedValue, 0, magicString, magicString); err == nil {
+ if err := m.Write(&pm); err != nil {
+ panic("error checking metric for labels")
+ }
+ for _, label := range pm.Label {
+ name, value := label.GetName(), label.GetValue()
+ if value != magicString {
+ continue
+ }
+ if name == "code" || name == "method" {
+ continue
+ }
+ panic("metric partitioned with non-supported labels")
+ }
+ code = true
+ method = true
+ return
+ }
+ panic("metric partitioned with non-supported labels")
+}
+
+// emptyLabels is a one-time allocation for non-partitioned metrics to avoid
+// unnecessary allocations on each request.
+var emptyLabels = prometheus.Labels{}
+
+func labels(code, method bool, reqMethod string, status int) prometheus.Labels {
+ if !(code || method) {
+ return emptyLabels
+ }
+ labels := prometheus.Labels{}
+
+ if code {
+ labels["code"] = sanitizeCode(status)
+ }
+ if method {
+ labels["method"] = sanitizeMethod(reqMethod)
+ }
+
+ return labels
+}
+
+func computeApproximateRequestSize(r *http.Request) int {
+ s := 0
+ if r.URL != nil {
+ s += len(r.URL.String())
+ }
+
+ s += len(r.Method)
+ s += len(r.Proto)
+ for name, values := range r.Header {
+ s += len(name)
+ for _, value := range values {
+ s += len(value)
+ }
+ }
+ s += len(r.Host)
+
+ // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL.
+
+ if r.ContentLength != -1 {
+ s += int(r.ContentLength)
+ }
+ return s
+}
+
+func sanitizeMethod(m string) string {
+ switch m {
+ case "GET", "get":
+ return "get"
+ case "PUT", "put":
+ return "put"
+ case "HEAD", "head":
+ return "head"
+ case "POST", "post":
+ return "post"
+ case "DELETE", "delete":
+ return "delete"
+ case "CONNECT", "connect":
+ return "connect"
+ case "OPTIONS", "options":
+ return "options"
+ case "NOTIFY", "notify":
+ return "notify"
+ default:
+ return strings.ToLower(m)
+ }
+}
+
+// If the wrapped http.Handler has not set a status code, i.e. the value is
+// currently 0, santizeCode will return 200, for consistency with behavior in
+// the stdlib.
+func sanitizeCode(s int) string {
+ switch s {
+ case 100:
+ return "100"
+ case 101:
+ return "101"
+
+ case 200, 0:
+ return "200"
+ case 201:
+ return "201"
+ case 202:
+ return "202"
+ case 203:
+ return "203"
+ case 204:
+ return "204"
+ case 205:
+ return "205"
+ case 206:
+ return "206"
+
+ case 300:
+ return "300"
+ case 301:
+ return "301"
+ case 302:
+ return "302"
+ case 304:
+ return "304"
+ case 305:
+ return "305"
+ case 307:
+ return "307"
+
+ case 400:
+ return "400"
+ case 401:
+ return "401"
+ case 402:
+ return "402"
+ case 403:
+ return "403"
+ case 404:
+ return "404"
+ case 405:
+ return "405"
+ case 406:
+ return "406"
+ case 407:
+ return "407"
+ case 408:
+ return "408"
+ case 409:
+ return "409"
+ case 410:
+ return "410"
+ case 411:
+ return "411"
+ case 412:
+ return "412"
+ case 413:
+ return "413"
+ case 414:
+ return "414"
+ case 415:
+ return "415"
+ case 416:
+ return "416"
+ case 417:
+ return "417"
+ case 418:
+ return "418"
+
+ case 500:
+ return "500"
+ case 501:
+ return "501"
+ case 502:
+ return "502"
+ case 503:
+ return "503"
+ case 504:
+ return "504"
+ case 505:
+ return "505"
+
+ case 428:
+ return "428"
+ case 429:
+ return "429"
+ case 431:
+ return "431"
+ case 511:
+ return "511"
+
+ default:
+ return strconv.Itoa(s)
+ }
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
index 8c6b5bd..c84a442 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
@@ -20,6 +20,7 @@ import (
"os"
"sort"
"sync"
+ "unicode/utf8"
"github.com/golang/protobuf/proto"
@@ -80,7 +81,7 @@ func NewPedanticRegistry() *Registry {
// Registerer is the interface for the part of a registry in charge of
// registering and unregistering. Users of custom registries should use
-// Registerer as type for registration purposes (rather then the Registry type
+// Registerer as type for registration purposes (rather than the Registry type
// directly). In that way, they are free to use custom Registerer implementation
// (e.g. for testing purposes).
type Registerer interface {
@@ -655,7 +656,7 @@ func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily)
// checkMetricConsistency checks if the provided Metric is consistent with the
// provided MetricFamily. It also hashed the Metric labels and the MetricFamily
-// name. If the resulting hash is alread in the provided metricHashes, an error
+// name. If the resulting hash is already in the provided metricHashes, an error
// is returned. If not, it is added to metricHashes. The provided dimHashes maps
// MetricFamily names to their dimHash (hashed sorted label names). If dimHashes
// doesn't yet contain a hash for the provided MetricFamily, it is
@@ -679,6 +680,12 @@ func checkMetricConsistency(
)
}
+ for _, labelPair := range dtoMetric.GetLabel() {
+ if !utf8.ValidString(*labelPair.Value) {
+ return fmt.Errorf("collected metric's label %s is not utf8: %#v", *labelPair.Name, *labelPair.Value)
+ }
+ }
+
// Is the metric unique (i.e. no other metric with the same name and the same label values)?
h := hashNew()
h = hashAdd(h, metricFamily.GetName())
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
index 82b8850..21c031e 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/summary.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go
@@ -36,7 +36,10 @@ const quantileLabel = "quantile"
//
// A typical use-case is the observation of request latencies. By default, a
// Summary provides the median, the 90th and the 99th percentile of the latency
-// as rank estimations.
+// as rank estimations. However, the default behavior will change in the
+// upcoming v0.10 of the library. There will be no rank estiamtions at all by
+// default. For a sane transition, it is recommended to set the desired rank
+// estimations explicitly.
//
// Note that the rank estimations cannot be aggregated in a meaningful way with
// the Prometheus query language (i.e. you cannot average or add them). If you
@@ -78,8 +81,10 @@ const (
)
// SummaryOpts bundles the options for creating a Summary metric. It is
-// mandatory to set Name and Help to a non-empty string. All other fields are
-// optional and can safely be left at their zero value.
+// mandatory to set Name and Help to a non-empty string. While all other fields
+// are optional and can safely be left at their zero value, it is recommended to
+// explicitly set the Objectives field to the desired value as the default value
+// will change in the upcoming v0.10 of the library.
type SummaryOpts struct {
// Namespace, Subsystem, and Name are components of the fully-qualified
// name of the Summary (created by joining these components with
@@ -399,12 +404,11 @@ func (s quantSort) Less(i, j int) bool {
// (e.g. HTTP request latencies, partitioned by status code and method). Create
// instances with NewSummaryVec.
type SummaryVec struct {
- *MetricVec
+ *metricVec
}
// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and
-// partitioned by the given label names. At least one label name must be
-// provided.
+// partitioned by the given label names.
func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
desc := NewDesc(
BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
@@ -413,30 +417,60 @@ func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec {
opts.ConstLabels,
)
return &SummaryVec{
- MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
+ metricVec: newMetricVec(desc, func(lvs ...string) Metric {
return newSummary(desc, opts, lvs...)
}),
}
}
-// GetMetricWithLabelValues replaces the method of the same name in
-// MetricVec. The difference is that this method returns a Summary and not a
-// Metric so that no type conversion is required.
-func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) {
- metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
+// GetMetricWithLabelValues returns the Summary for the given slice of label
+// values (same order as the VariableLabels in Desc). If that combination of
+// label values is accessed for the first time, a new Summary is created.
+//
+// It is possible to call this method without using the returned Summary to only
+// create the new Summary but leave it at its starting value, a Summary without
+// any observations.
+//
+// Keeping the Summary for later use is possible (and should be considered if
+// performance is critical), but keep in mind that Reset, DeleteLabelValues and
+// Delete can be used to delete the Summary from the SummaryVec. In that case, the
+// Summary will still exist, but it will not be exported anymore, even if a
+// Summary with the same label values is created later. See also the CounterVec
+// example.
+//
+// An error is returned if the number of label values is not the same as the
+// number of VariableLabels in Desc.
+//
+// Note that for more than one label value, this method is prone to mistakes
+// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
+// an alternative to avoid that type of mistake. For higher label numbers, the
+// latter has a much more readable (albeit more verbose) syntax, but it comes
+// with a performance overhead (for creating and processing the Labels map).
+// See also the GaugeVec example.
+func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Observer, error) {
+ metric, err := m.metricVec.getMetricWithLabelValues(lvs...)
if metric != nil {
- return metric.(Summary), err
+ return metric.(Observer), err
}
return nil, err
}
-// GetMetricWith replaces the method of the same name in MetricVec. The
-// difference is that this method returns a Summary and not a Metric so that no
-// type conversion is required.
-func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) {
- metric, err := m.MetricVec.GetMetricWith(labels)
+// GetMetricWith returns the Summary for the given Labels map (the label names
+// must match those of the VariableLabels in Desc). If that label map is
+// accessed for the first time, a new Summary is created. Implications of
+// creating a Summary without using it and keeping the Summary for later use are
+// the same as for GetMetricWithLabelValues.
+//
+// An error is returned if the number and names of the Labels are inconsistent
+// with those of the VariableLabels in Desc.
+//
+// This method is used for the same purpose as
+// GetMetricWithLabelValues(...string). See there for pros and cons of the two
+// methods.
+func (m *SummaryVec) GetMetricWith(labels Labels) (Observer, error) {
+ metric, err := m.metricVec.getMetricWith(labels)
if metric != nil {
- return metric.(Summary), err
+ return metric.(Observer), err
}
return nil, err
}
@@ -445,15 +479,15 @@ func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) {
// GetMetricWithLabelValues would have returned an error. By not returning an
// error, WithLabelValues allows shortcuts like
// myVec.WithLabelValues("404", "GET").Observe(42.21)
-func (m *SummaryVec) WithLabelValues(lvs ...string) Summary {
- return m.MetricVec.WithLabelValues(lvs...).(Summary)
+func (m *SummaryVec) WithLabelValues(lvs ...string) Observer {
+ return m.metricVec.withLabelValues(lvs...).(Observer)
}
// With works as GetMetricWith, but panics where GetMetricWithLabels would have
// returned an error. By not returning an error, With allows shortcuts like
// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21)
-func (m *SummaryVec) With(labels Labels) Summary {
- return m.MetricVec.With(labels).(Summary)
+func (m *SummaryVec) With(labels Labels) Observer {
+ return m.metricVec.with(labels).(Observer)
}
type constSummary struct {
@@ -514,8 +548,8 @@ func NewConstSummary(
quantiles map[float64]float64,
labelValues ...string,
) (Metric, error) {
- if len(desc.variableLabels) != len(labelValues) {
- return nil, errInconsistentCardinality
+ if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
+ return nil, err
}
return &constSummary{
desc: desc,
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/timer.go b/vendor/github.com/prometheus/client_golang/prometheus/timer.go
index f4cac5a..b8fc5f1 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/timer.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/timer.go
@@ -15,32 +15,6 @@ package prometheus
import "time"
-// Observer is the interface that wraps the Observe method, which is used by
-// Histogram and Summary to add observations.
-type Observer interface {
- Observe(float64)
-}
-
-// The ObserverFunc type is an adapter to allow the use of ordinary
-// functions as Observers. If f is a function with the appropriate
-// signature, ObserverFunc(f) is an Observer that calls f.
-//
-// This adapter is usually used in connection with the Timer type, and there are
-// two general use cases:
-//
-// The most common one is to use a Gauge as the Observer for a Timer.
-// See the "Gauge" Timer example.
-//
-// The more advanced use case is to create a function that dynamically decides
-// which Observer to use for observing the duration. See the "Complex" Timer
-// example.
-type ObserverFunc func(float64)
-
-// Observe calls f(value). It implements Observer.
-func (f ObserverFunc) Observe(value float64) {
- f(value)
-}
-
// Timer is a helper type to time functions. Use NewTimer to create new
// instances.
type Timer struct {
@@ -67,6 +41,9 @@ func NewTimer(o Observer) *Timer {
// NewTimer. It calls the Observe method of the Observer provided during
// construction with the duration in seconds as an argument. ObserveDuration is
// usually called with a defer statement.
+//
+// Note that this method is only guaranteed to never observe negative durations
+// if used with Go1.9+.
func (t *Timer) ObserveDuration() {
if t.observer != nil {
t.observer.Observe(time.Since(t.begin).Seconds())
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
index 065501d..0f9ce63 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go
@@ -13,113 +13,12 @@
package prometheus
-// Untyped is a Metric that represents a single numerical value that can
-// arbitrarily go up and down.
-//
-// An Untyped metric works the same as a Gauge. The only difference is that to
-// no type information is implied.
-//
-// To create Untyped instances, use NewUntyped.
-//
-// Deprecated: The Untyped type is deprecated because it doesn't make sense in
-// direct instrumentation. If you need to mirror an external metric of unknown
-// type (usually while writing exporters), Use MustNewConstMetric to create an
-// untyped metric instance on the fly.
-type Untyped interface {
- Metric
- Collector
-
- // Set sets the Untyped metric to an arbitrary value.
- Set(float64)
- // Inc increments the Untyped metric by 1.
- Inc()
- // Dec decrements the Untyped metric by 1.
- Dec()
- // Add adds the given value to the Untyped metric. (The value can be
- // negative, resulting in a decrease.)
- Add(float64)
- // Sub subtracts the given value from the Untyped metric. (The value can
- // be negative, resulting in an increase.)
- Sub(float64)
-}
-
// UntypedOpts is an alias for Opts. See there for doc comments.
type UntypedOpts Opts
-// NewUntyped creates a new Untyped metric from the provided UntypedOpts.
-func NewUntyped(opts UntypedOpts) Untyped {
- return newValue(NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- nil,
- opts.ConstLabels,
- ), UntypedValue, 0)
-}
-
-// UntypedVec is a Collector that bundles a set of Untyped metrics that all
-// share the same Desc, but have different values for their variable
-// labels. This is used if you want to count the same thing partitioned by
-// various dimensions. Create instances with NewUntypedVec.
-type UntypedVec struct {
- *MetricVec
-}
-
-// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and
-// partitioned by the given label names. At least one label name must be
-// provided.
-func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec {
- desc := NewDesc(
- BuildFQName(opts.Namespace, opts.Subsystem, opts.Name),
- opts.Help,
- labelNames,
- opts.ConstLabels,
- )
- return &UntypedVec{
- MetricVec: newMetricVec(desc, func(lvs ...string) Metric {
- return newValue(desc, UntypedValue, 0, lvs...)
- }),
- }
-}
-
-// GetMetricWithLabelValues replaces the method of the same name in
-// MetricVec. The difference is that this method returns an Untyped and not a
-// Metric so that no type conversion is required.
-func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) {
- metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...)
- if metric != nil {
- return metric.(Untyped), err
- }
- return nil, err
-}
-
-// GetMetricWith replaces the method of the same name in MetricVec. The
-// difference is that this method returns an Untyped and not a Metric so that no
-// type conversion is required.
-func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) {
- metric, err := m.MetricVec.GetMetricWith(labels)
- if metric != nil {
- return metric.(Untyped), err
- }
- return nil, err
-}
-
-// WithLabelValues works as GetMetricWithLabelValues, but panics where
-// GetMetricWithLabelValues would have returned an error. By not returning an
-// error, WithLabelValues allows shortcuts like
-// myVec.WithLabelValues("404", "GET").Add(42)
-func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped {
- return m.MetricVec.WithLabelValues(lvs...).(Untyped)
-}
-
-// With works as GetMetricWith, but panics where GetMetricWithLabels would have
-// returned an error. By not returning an error, With allows shortcuts like
-// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42)
-func (m *UntypedVec) With(labels Labels) Untyped {
- return m.MetricVec.With(labels).(Untyped)
-}
-
-// UntypedFunc is an Untyped whose value is determined at collect time by
-// calling a provided function.
+// UntypedFunc works like GaugeFunc but the collected metric is of type
+// "Untyped". UntypedFunc is useful to mirror an external metric of unknown
+// type.
//
// To create UntypedFunc instances, use NewUntypedFunc.
type UntypedFunc interface {
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go
index ff75ce5..4a9cca6 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/value.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go
@@ -14,7 +14,6 @@
package prometheus
import (
- "errors"
"fmt"
"math"
"sort"
@@ -37,8 +36,6 @@ const (
UntypedValue
)
-var errInconsistentCardinality = errors.New("inconsistent label cardinality")
-
// value is a generic metric for simple values. It implements Metric, Collector,
// Counter, Gauge, and Untyped. Its effective type is determined by
// ValueType. This is a low-level building block used by the library to back the
@@ -158,8 +155,8 @@ func (v *valueFunc) Write(out *dto.Metric) error {
// the Collect method. NewConstMetric returns an error if the length of
// labelValues is not consistent with the variable labels in Desc.
func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) {
- if len(desc.variableLabels) != len(labelValues) {
- return nil, errInconsistentCardinality
+ if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
+ return nil, err
}
return &constMetric{
desc: desc,
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
index 7f3eef9..65d13fe 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/vec.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go
@@ -20,12 +20,12 @@ import (
"github.com/prometheus/common/model"
)
-// MetricVec is a Collector to bundle metrics of the same name that
-// differ in their label values. MetricVec is usually not used directly but as a
-// building block for implementations of vectors of a given metric
-// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already
-// provided in this package.
-type MetricVec struct {
+// metricVec is a Collector to bundle metrics of the same name that differ in
+// their label values. metricVec is not used directly (and therefore
+// unexported). It is used as a building block for implementations of vectors of
+// a given metric type, like GaugeVec, CounterVec, SummaryVec, HistogramVec, and
+// UntypedVec.
+type metricVec struct {
mtx sync.RWMutex // Protects the children.
children map[uint64][]metricWithLabelValues
desc *Desc
@@ -35,10 +35,9 @@ type MetricVec struct {
hashAddByte func(h uint64, b byte) uint64
}
-// newMetricVec returns an initialized MetricVec. The concrete value is
-// returned for embedding into another struct.
-func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
- return &MetricVec{
+// newMetricVec returns an initialized metricVec.
+func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *metricVec {
+ return &metricVec{
children: map[uint64][]metricWithLabelValues{},
desc: desc,
newMetric: newMetric,
@@ -56,12 +55,12 @@ type metricWithLabelValues struct {
// Describe implements Collector. The length of the returned slice
// is always one.
-func (m *MetricVec) Describe(ch chan<- *Desc) {
+func (m *metricVec) Describe(ch chan<- *Desc) {
ch <- m.desc
}
// Collect implements Collector.
-func (m *MetricVec) Collect(ch chan<- Metric) {
+func (m *metricVec) Collect(ch chan<- Metric) {
m.mtx.RLock()
defer m.mtx.RUnlock()
@@ -72,31 +71,7 @@ func (m *MetricVec) Collect(ch chan<- Metric) {
}
}
-// GetMetricWithLabelValues returns the Metric for the given slice of label
-// values (same order as the VariableLabels in Desc). If that combination of
-// label values is accessed for the first time, a new Metric is created.
-//
-// It is possible to call this method without using the returned Metric to only
-// create the new Metric but leave it at its start value (e.g. a Summary or
-// Histogram without any observations). See also the SummaryVec example.
-//
-// Keeping the Metric for later use is possible (and should be considered if
-// performance is critical), but keep in mind that Reset, DeleteLabelValues and
-// Delete can be used to delete the Metric from the MetricVec. In that case, the
-// Metric will still exist, but it will not be exported anymore, even if a
-// Metric with the same label values is created later. See also the CounterVec
-// example.
-//
-// An error is returned if the number of label values is not the same as the
-// number of VariableLabels in Desc.
-//
-// Note that for more than one label value, this method is prone to mistakes
-// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as
-// an alternative to avoid that type of mistake. For higher label numbers, the
-// latter has a much more readable (albeit more verbose) syntax, but it comes
-// with a performance overhead (for creating and processing the Labels map).
-// See also the GaugeVec example.
-func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
+func (m *metricVec) getMetricWithLabelValues(lvs ...string) (Metric, error) {
h, err := m.hashLabelValues(lvs)
if err != nil {
return nil, err
@@ -105,19 +80,7 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
return m.getOrCreateMetricWithLabelValues(h, lvs), nil
}
-// GetMetricWith returns the Metric for the given Labels map (the label names
-// must match those of the VariableLabels in Desc). If that label map is
-// accessed for the first time, a new Metric is created. Implications of
-// creating a Metric without using it and keeping the Metric for later use are
-// the same as for GetMetricWithLabelValues.
-//
-// An error is returned if the number and names of the Labels are inconsistent
-// with those of the VariableLabels in Desc.
-//
-// This method is used for the same purpose as
-// GetMetricWithLabelValues(...string). See there for pros and cons of the two
-// methods.
-func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
+func (m *metricVec) getMetricWith(labels Labels) (Metric, error) {
h, err := m.hashLabels(labels)
if err != nil {
return nil, err
@@ -126,22 +89,16 @@ func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
return m.getOrCreateMetricWithLabels(h, labels), nil
}
-// WithLabelValues works as GetMetricWithLabelValues, but panics if an error
-// occurs. The method allows neat syntax like:
-// httpReqs.WithLabelValues("404", "POST").Inc()
-func (m *MetricVec) WithLabelValues(lvs ...string) Metric {
- metric, err := m.GetMetricWithLabelValues(lvs...)
+func (m *metricVec) withLabelValues(lvs ...string) Metric {
+ metric, err := m.getMetricWithLabelValues(lvs...)
if err != nil {
panic(err)
}
return metric
}
-// With works as GetMetricWith, but panics if an error occurs. The method allows
-// neat syntax like:
-// httpReqs.With(Labels{"status":"404", "method":"POST"}).Inc()
-func (m *MetricVec) With(labels Labels) Metric {
- metric, err := m.GetMetricWith(labels)
+func (m *metricVec) with(labels Labels) Metric {
+ metric, err := m.getMetricWith(labels)
if err != nil {
panic(err)
}
@@ -153,8 +110,8 @@ func (m *MetricVec) With(labels Labels) Metric {
// returns true if a metric was deleted.
//
// It is not an error if the number of label values is not the same as the
-// number of VariableLabels in Desc. However, such inconsistent label count can
-// never match an actual Metric, so the method will always return false in that
+// number of VariableLabels in Desc. However, such inconsistent label count can
+// never match an actual metric, so the method will always return false in that
// case.
//
// Note that for more than one label value, this method is prone to mistakes
@@ -163,7 +120,7 @@ func (m *MetricVec) With(labels Labels) Metric {
// latter has a much more readable (albeit more verbose) syntax, but it comes
// with a performance overhead (for creating and processing the Labels map).
// See also the CounterVec example.
-func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
+func (m *metricVec) DeleteLabelValues(lvs ...string) bool {
m.mtx.Lock()
defer m.mtx.Unlock()
@@ -178,13 +135,13 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
// passed in as labels. It returns true if a metric was deleted.
//
// It is not an error if the number and names of the Labels are inconsistent
-// with those of the VariableLabels in the Desc of the MetricVec. However, such
-// inconsistent Labels can never match an actual Metric, so the method will
-// always return false in that case.
+// with those of the VariableLabels in Desc. However, such inconsistent Labels
+// can never match an actual metric, so the method will always return false in
+// that case.
//
// This method is used for the same purpose as DeleteLabelValues(...string). See
// there for pros and cons of the two methods.
-func (m *MetricVec) Delete(labels Labels) bool {
+func (m *metricVec) Delete(labels Labels) bool {
m.mtx.Lock()
defer m.mtx.Unlock()
@@ -199,7 +156,7 @@ func (m *MetricVec) Delete(labels Labels) bool {
// deleteByHashWithLabelValues removes the metric from the hash bucket h. If
// there are multiple matches in the bucket, use lvs to select a metric and
// remove only that metric.
-func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool {
+func (m *metricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool {
metrics, ok := m.children[h]
if !ok {
return false
@@ -221,7 +178,7 @@ func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool {
// deleteByHashWithLabels removes the metric from the hash bucket h. If there
// are multiple matches in the bucket, use lvs to select a metric and remove
// only that metric.
-func (m *MetricVec) deleteByHashWithLabels(h uint64, labels Labels) bool {
+func (m *metricVec) deleteByHashWithLabels(h uint64, labels Labels) bool {
metrics, ok := m.children[h]
if !ok {
return false
@@ -240,7 +197,7 @@ func (m *MetricVec) deleteByHashWithLabels(h uint64, labels Labels) bool {
}
// Reset deletes all metrics in this vector.
-func (m *MetricVec) Reset() {
+func (m *metricVec) Reset() {
m.mtx.Lock()
defer m.mtx.Unlock()
@@ -249,10 +206,11 @@ func (m *MetricVec) Reset() {
}
}
-func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
- if len(vals) != len(m.desc.variableLabels) {
- return 0, errInconsistentCardinality
+func (m *metricVec) hashLabelValues(vals []string) (uint64, error) {
+ if err := validateLabelValues(vals, len(m.desc.variableLabels)); err != nil {
+ return 0, err
}
+
h := hashNew()
for _, val := range vals {
h = m.hashAdd(h, val)
@@ -261,10 +219,11 @@ func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
return h, nil
}
-func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
- if len(labels) != len(m.desc.variableLabels) {
- return 0, errInconsistentCardinality
+func (m *metricVec) hashLabels(labels Labels) (uint64, error) {
+ if err := validateValuesInLabels(labels, len(m.desc.variableLabels)); err != nil {
+ return 0, err
}
+
h := hashNew()
for _, label := range m.desc.variableLabels {
val, ok := labels[label]
@@ -281,9 +240,9 @@ func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
// or creates it and returns the new one.
//
// This function holds the mutex.
-func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) Metric {
+func (m *metricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) Metric {
m.mtx.RLock()
- metric, ok := m.getMetricWithLabelValues(hash, lvs)
+ metric, ok := m.getMetricWithHashAndLabelValues(hash, lvs)
m.mtx.RUnlock()
if ok {
return metric
@@ -291,7 +250,7 @@ func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string)
m.mtx.Lock()
defer m.mtx.Unlock()
- metric, ok = m.getMetricWithLabelValues(hash, lvs)
+ metric, ok = m.getMetricWithHashAndLabelValues(hash, lvs)
if !ok {
// Copy to avoid allocation in case wo don't go down this code path.
copiedLVs := make([]string, len(lvs))
@@ -306,9 +265,9 @@ func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string)
// or creates it and returns the new one.
//
// This function holds the mutex.
-func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metric {
+func (m *metricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metric {
m.mtx.RLock()
- metric, ok := m.getMetricWithLabels(hash, labels)
+ metric, ok := m.getMetricWithHashAndLabels(hash, labels)
m.mtx.RUnlock()
if ok {
return metric
@@ -316,7 +275,7 @@ func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metr
m.mtx.Lock()
defer m.mtx.Unlock()
- metric, ok = m.getMetricWithLabels(hash, labels)
+ metric, ok = m.getMetricWithHashAndLabels(hash, labels)
if !ok {
lvs := m.extractLabelValues(labels)
metric = m.newMetric(lvs...)
@@ -325,9 +284,9 @@ func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metr
return metric
}
-// getMetricWithLabelValues gets a metric while handling possible collisions in
-// the hash space. Must be called while holding read mutex.
-func (m *MetricVec) getMetricWithLabelValues(h uint64, lvs []string) (Metric, bool) {
+// getMetricWithHashAndLabelValues gets a metric while handling possible
+// collisions in the hash space. Must be called while holding the read mutex.
+func (m *metricVec) getMetricWithHashAndLabelValues(h uint64, lvs []string) (Metric, bool) {
metrics, ok := m.children[h]
if ok {
if i := m.findMetricWithLabelValues(metrics, lvs); i < len(metrics) {
@@ -337,9 +296,9 @@ func (m *MetricVec) getMetricWithLabelValues(h uint64, lvs []string) (Metric, bo
return nil, false
}
-// getMetricWithLabels gets a metric while handling possible collisions in
+// getMetricWithHashAndLabels gets a metric while handling possible collisions in
// the hash space. Must be called while holding read mutex.
-func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool) {
+func (m *metricVec) getMetricWithHashAndLabels(h uint64, labels Labels) (Metric, bool) {
metrics, ok := m.children[h]
if ok {
if i := m.findMetricWithLabels(metrics, labels); i < len(metrics) {
@@ -351,7 +310,7 @@ func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool)
// findMetricWithLabelValues returns the index of the matching metric or
// len(metrics) if not found.
-func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, lvs []string) int {
+func (m *metricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, lvs []string) int {
for i, metric := range metrics {
if m.matchLabelValues(metric.values, lvs) {
return i
@@ -362,7 +321,7 @@ func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, l
// findMetricWithLabels returns the index of the matching metric or len(metrics)
// if not found.
-func (m *MetricVec) findMetricWithLabels(metrics []metricWithLabelValues, labels Labels) int {
+func (m *metricVec) findMetricWithLabels(metrics []metricWithLabelValues, labels Labels) int {
for i, metric := range metrics {
if m.matchLabels(metric.values, labels) {
return i
@@ -371,7 +330,7 @@ func (m *MetricVec) findMetricWithLabels(metrics []metricWithLabelValues, labels
return len(metrics)
}
-func (m *MetricVec) matchLabelValues(values []string, lvs []string) bool {
+func (m *metricVec) matchLabelValues(values []string, lvs []string) bool {
if len(values) != len(lvs) {
return false
}
@@ -383,7 +342,7 @@ func (m *MetricVec) matchLabelValues(values []string, lvs []string) bool {
return true
}
-func (m *MetricVec) matchLabels(values []string, labels Labels) bool {
+func (m *metricVec) matchLabels(values []string, labels Labels) bool {
if len(labels) != len(values) {
return false
}
@@ -395,7 +354,7 @@ func (m *MetricVec) matchLabels(values []string, labels Labels) bool {
return true
}
-func (m *MetricVec) extractLabelValues(labels Labels) []string {
+func (m *metricVec) extractLabelValues(labels Labels) []string {
labelValues := make([]string, len(labels))
for i, k := range m.desc.variableLabels {
labelValues[i] = labels[k]
diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go
index ef9a150..54bcfde 100644
--- a/vendor/github.com/prometheus/common/expfmt/text_parse.go
+++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go
@@ -315,6 +315,10 @@ func (p *TextParser) startLabelValue() stateFn {
if p.readTokenAsLabelValue(); p.err != nil {
return nil
}
+ if !model.LabelValue(p.currentToken.String()).IsValid() {
+ p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String()))
+ return nil
+ }
p.currentLabelPair.Value = proto.String(p.currentToken.String())
// Special treatment of summaries:
// - Quantile labels are special, will result in dto.Quantile later.
diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go
index 548968a..7e87f1a 100644
--- a/vendor/github.com/prometheus/common/model/time.go
+++ b/vendor/github.com/prometheus/common/model/time.go
@@ -163,9 +163,21 @@ func (t *Time) UnmarshalJSON(b []byte) error {
// This type should not propagate beyond the scope of input/output processing.
type Duration time.Duration
+// Set implements pflag/flag.Value
+func (d *Duration) Set(s string) error {
+ var err error
+ *d, err = ParseDuration(s)
+ return err
+}
+
+// Type implements pflag.Value
+func (d *Duration) Type() string {
+ return "duration"
+}
+
var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$")
-// StringToDuration parses a string into a time.Duration, assuming that a year
+// ParseDuration parses a string into a time.Duration, assuming that a year
// always has 365d, a week always has 7d, and a day always has 24h.
func ParseDuration(durationStr string) (Duration, error) {
matches := durationRE.FindStringSubmatch(durationStr)
diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile
index c264a49..dd48afd 100644
--- a/vendor/github.com/prometheus/procfs/Makefile
+++ b/vendor/github.com/prometheus/procfs/Makefile
@@ -1,6 +1,18 @@
-ci:
+ci: fmt lint test
+
+fmt:
! gofmt -l *.go | read nothing
go vet
- go test -v ./...
+
+lint:
go get github.com/golang/lint/golint
golint *.go
+
+test: sysfs/fixtures/.unpacked
+ go test -v ./...
+
+sysfs/fixtures/.unpacked: sysfs/fixtures.ttar
+ ./ttar -C sysfs -x -f sysfs/fixtures.ttar
+ touch $@
+
+.PHONY: fmt lint test ci
diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go
index e7012f7..696d114 100644
--- a/vendor/github.com/prometheus/procfs/ipvs.go
+++ b/vendor/github.com/prometheus/procfs/ipvs.go
@@ -33,6 +33,8 @@ type IPVSBackendStatus struct {
LocalAddress net.IP
// The local (virtual) port.
LocalPort uint16
+ // The local firewall mark
+ LocalMark string
// The transport protocol (TCP, UDP).
Proto string
// The remote (real) IP address.
@@ -142,6 +144,7 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
status []IPVSBackendStatus
scanner = bufio.NewScanner(file)
proto string
+ localMark string
localAddress net.IP
localPort uint16
err error
@@ -160,10 +163,19 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
continue
}
proto = fields[0]
+ localMark = ""
localAddress, localPort, err = parseIPPort(fields[1])
if err != nil {
return nil, err
}
+ case fields[0] == "FWM":
+ if len(fields) < 2 {
+ continue
+ }
+ proto = fields[0]
+ localMark = fields[1]
+ localAddress = nil
+ localPort = 0
case fields[0] == "->":
if len(fields) < 6 {
continue
@@ -187,6 +199,7 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
status = append(status, IPVSBackendStatus{
LocalAddress: localAddress,
LocalPort: localPort,
+ LocalMark: localMark,
RemoteAddress: remoteAddress,
RemotePort: remotePort,
Proto: proto,
@@ -200,22 +213,31 @@ func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) {
}
func parseIPPort(s string) (net.IP, uint16, error) {
- tmp := strings.SplitN(s, ":", 2)
-
- if len(tmp) != 2 {
- return nil, 0, fmt.Errorf("invalid IP:Port: %s", s)
- }
+ var (
+ ip net.IP
+ err error
+ )
- if len(tmp[0]) != 8 && len(tmp[0]) != 32 {
- return nil, 0, fmt.Errorf("invalid IP: %s", tmp[0])
+ switch len(s) {
+ case 13:
+ ip, err = hex.DecodeString(s[0:8])
+ if err != nil {
+ return nil, 0, err
+ }
+ case 46:
+ ip = net.ParseIP(s[1:40])
+ if ip == nil {
+ return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40])
+ }
+ default:
+ return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s)
}
- ip, err := hex.DecodeString(tmp[0])
- if err != nil {
- return nil, 0, err
+ portString := s[len(s)-4:]
+ if len(portString) != 4 {
+ return nil, 0, fmt.Errorf("unexpected port string format: %s", portString)
}
-
- port, err := strconv.ParseUint(tmp[1], 16, 16)
+ port, err := strconv.ParseUint(portString, 16, 16)
if err != nil {
return nil, 0, err
}
diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go
index fe8f1f6..6b2b0ba 100644
--- a/vendor/github.com/prometheus/procfs/mountstats.go
+++ b/vendor/github.com/prometheus/procfs/mountstats.go
@@ -523,15 +523,19 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
}
// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
- // in a v1.0 response
- ns := make([]uint64, 0, fieldTransport11Len)
- for _, s := range ss {
+ // in a v1.0 response.
+ //
+ // Note: slice length must be set to length of v1.1 stats to avoid a panic when
+ // only v1.0 stats are present.
+ // See: https://github.com/prometheus/node_exporter/issues/571.
+ ns := make([]uint64, fieldTransport11Len)
+ for i, s := range ss {
n, err := strconv.ParseUint(s, 10, 64)
if err != nil {
return nil, err
}
- ns = append(ns, n)
+ ns[i] = n
}
return &NFSTransportStats{
diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go
index 2df997c..b684a5b 100644
--- a/vendor/github.com/prometheus/procfs/proc_limits.go
+++ b/vendor/github.com/prometheus/procfs/proc_limits.go
@@ -13,46 +13,46 @@ import (
// http://man7.org/linux/man-pages/man2/getrlimit.2.html.
type ProcLimits struct {
// CPU time limit in seconds.
- CPUTime int
+ CPUTime int64
// Maximum size of files that the process may create.
- FileSize int
+ FileSize int64
// Maximum size of the process's data segment (initialized data,
// uninitialized data, and heap).
- DataSize int
+ DataSize int64
// Maximum size of the process stack in bytes.
- StackSize int
+ StackSize int64
// Maximum size of a core file.
- CoreFileSize int
+ CoreFileSize int64
// Limit of the process's resident set in pages.
- ResidentSet int
+ ResidentSet int64
// Maximum number of processes that can be created for the real user ID of
// the calling process.
- Processes int
+ Processes int64
// Value one greater than the maximum file descriptor number that can be
// opened by this process.
- OpenFiles int
+ OpenFiles int64
// Maximum number of bytes of memory that may be locked into RAM.
- LockedMemory int
+ LockedMemory int64
// Maximum size of the process's virtual memory address space in bytes.
- AddressSpace int
+ AddressSpace int64
// Limit on the combined number of flock(2) locks and fcntl(2) leases that
// this process may establish.
- FileLocks int
+ FileLocks int64
// Limit of signals that may be queued for the real user ID of the calling
// process.
- PendingSignals int
+ PendingSignals int64
// Limit on the number of bytes that can be allocated for POSIX message
// queues for the real user ID of the calling process.
- MsqqueueSize int
+ MsqqueueSize int64
// Limit of the nice priority set using setpriority(2) or nice(2).
- NicePriority int
+ NicePriority int64
// Limit of the real-time priority set using sched_setscheduler(2) or
// sched_setparam(2).
- RealtimePriority int
+ RealtimePriority int64
// Limit (in microseconds) on the amount of CPU time that a process
// scheduled under a real-time scheduling policy may consume without making
// a blocking system call.
- RealtimeTimeout int
+ RealtimeTimeout int64
}
const (
@@ -125,13 +125,13 @@ func (p Proc) NewLimits() (ProcLimits, error) {
return l, s.Err()
}
-func parseInt(s string) (int, error) {
+func parseInt(s string) (int64, error) {
if s == limitsUnlimited {
return -1, nil
}
- i, err := strconv.ParseInt(s, 10, 32)
+ i, err := strconv.ParseInt(s, 10, 64)
if err != nil {
return 0, fmt.Errorf("couldn't parse value %s: %s", s, err)
}
- return int(i), nil
+ return i, nil
}
diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go
index 1ca217e..701f4df 100644
--- a/vendor/github.com/prometheus/procfs/stat.go
+++ b/vendor/github.com/prometheus/procfs/stat.go
@@ -3,15 +3,66 @@ package procfs
import (
"bufio"
"fmt"
+ "io"
"os"
"strconv"
"strings"
)
+// CPUStat shows how much time the cpu spend in various stages.
+type CPUStat struct {
+ User float64
+ Nice float64
+ System float64
+ Idle float64
+ Iowait float64
+ IRQ float64
+ SoftIRQ float64
+ Steal float64
+ Guest float64
+ GuestNice float64
+}
+
+// SoftIRQStat represent the softirq statistics as exported in the procfs stat file.
+// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html
+// It is possible to get per-cpu stats by reading /proc/softirqs
+type SoftIRQStat struct {
+ Hi uint64
+ Timer uint64
+ NetTx uint64
+ NetRx uint64
+ Block uint64
+ BlockIoPoll uint64
+ Tasklet uint64
+ Sched uint64
+ Hrtimer uint64
+ Rcu uint64
+}
+
// Stat represents kernel/system statistics.
type Stat struct {
// Boot time in seconds since the Epoch.
- BootTime int64
+ BootTime uint64
+ // Summed up cpu statistics.
+ CPUTotal CPUStat
+ // Per-CPU statistics.
+ CPU []CPUStat
+ // Number of times interrupts were handled, which contains numbered and unnumbered IRQs.
+ IRQTotal uint64
+ // Number of times a numbered IRQ was triggered.
+ IRQ []uint64
+ // Number of times a context switch happened.
+ ContextSwitches uint64
+ // Number of times a process was created.
+ ProcessCreated uint64
+ // Number of processes currently running.
+ ProcessesRunning uint64
+ // Number of processes currently blocked (waiting for IO).
+ ProcessesBlocked uint64
+ // Number of times a softirq was scheduled.
+ SoftIRQTotal uint64
+ // Detailed softirq statistics.
+ SoftIRQ SoftIRQStat
}
// NewStat returns kernel/system statistics read from /proc/stat.
@@ -24,33 +75,145 @@ func NewStat() (Stat, error) {
return fs.NewStat()
}
+// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum).
+func parseCPUStat(line string) (CPUStat, int64, error) {
+ cpuStat := CPUStat{}
+ var cpu string
+
+ count, err := fmt.Sscanf(line, "%s %f %f %f %f %f %f %f %f %f %f",
+ &cpu,
+ &cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle,
+ &cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal,
+ &cpuStat.Guest, &cpuStat.GuestNice)
+
+ if err != nil && err != io.EOF {
+ return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): %s", line, err)
+ }
+ if count == 0 {
+ return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): 0 elements parsed", line)
+ }
+
+ cpuStat.User /= userHZ
+ cpuStat.Nice /= userHZ
+ cpuStat.System /= userHZ
+ cpuStat.Idle /= userHZ
+ cpuStat.Iowait /= userHZ
+ cpuStat.IRQ /= userHZ
+ cpuStat.SoftIRQ /= userHZ
+ cpuStat.Steal /= userHZ
+ cpuStat.Guest /= userHZ
+ cpuStat.GuestNice /= userHZ
+
+ if cpu == "cpu" {
+ return cpuStat, -1, nil
+ }
+
+ cpuID, err := strconv.ParseInt(cpu[3:], 10, 64)
+ if err != nil {
+ return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu/cpuid): %s", line, err)
+ }
+
+ return cpuStat, cpuID, nil
+}
+
+// Parse a softirq line.
+func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) {
+ softIRQStat := SoftIRQStat{}
+ var total uint64
+ var prefix string
+
+ _, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d",
+ &prefix, &total,
+ &softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx,
+ &softIRQStat.Block, &softIRQStat.BlockIoPoll,
+ &softIRQStat.Tasklet, &softIRQStat.Sched,
+ &softIRQStat.Hrtimer, &softIRQStat.Rcu)
+
+ if err != nil {
+ return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %s (softirq): %s", line, err)
+ }
+
+ return softIRQStat, total, nil
+}
+
// NewStat returns an information about current kernel/system statistics.
func (fs FS) NewStat() (Stat, error) {
+ // See https://www.kernel.org/doc/Documentation/filesystems/proc.txt
+
f, err := os.Open(fs.Path("stat"))
if err != nil {
return Stat{}, err
}
defer f.Close()
- s := bufio.NewScanner(f)
- for s.Scan() {
- line := s.Text()
- if !strings.HasPrefix(line, "btime") {
+ stat := Stat{}
+
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ line := scanner.Text()
+ parts := strings.Fields(scanner.Text())
+ // require at least <key> <value>
+ if len(parts) < 2 {
continue
}
- fields := strings.Fields(line)
- if len(fields) != 2 {
- return Stat{}, fmt.Errorf("couldn't parse %s line %s", f.Name(), line)
- }
- i, err := strconv.ParseInt(fields[1], 10, 32)
- if err != nil {
- return Stat{}, fmt.Errorf("couldn't parse %s: %s", fields[1], err)
+ switch {
+ case parts[0] == "btime":
+ if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s (btime): %s", parts[1], err)
+ }
+ case parts[0] == "intr":
+ if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s (intr): %s", parts[1], err)
+ }
+ numberedIRQs := parts[2:]
+ stat.IRQ = make([]uint64, len(numberedIRQs))
+ for i, count := range numberedIRQs {
+ if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s (intr%d): %s", count, i, err)
+ }
+ }
+ case parts[0] == "ctxt":
+ if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s (ctxt): %s", parts[1], err)
+ }
+ case parts[0] == "processes":
+ if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s (processes): %s", parts[1], err)
+ }
+ case parts[0] == "procs_running":
+ if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s (procs_running): %s", parts[1], err)
+ }
+ case parts[0] == "procs_blocked":
+ if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil {
+ return Stat{}, fmt.Errorf("couldn't parse %s (procs_blocked): %s", parts[1], err)
+ }
+ case parts[0] == "softirq":
+ softIRQStats, total, err := parseSoftIRQStat(line)
+ if err != nil {
+ return Stat{}, err
+ }
+ stat.SoftIRQTotal = total
+ stat.SoftIRQ = softIRQStats
+ case strings.HasPrefix(parts[0], "cpu"):
+ cpuStat, cpuID, err := parseCPUStat(line)
+ if err != nil {
+ return Stat{}, err
+ }
+ if cpuID == -1 {
+ stat.CPUTotal = cpuStat
+ } else {
+ for int64(len(stat.CPU)) <= cpuID {
+ stat.CPU = append(stat.CPU, CPUStat{})
+ }
+ stat.CPU[cpuID] = cpuStat
+ }
}
- return Stat{BootTime: i}, nil
}
- if err := s.Err(); err != nil {
+
+ if err := scanner.Err(); err != nil {
return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err)
}
- return Stat{}, fmt.Errorf("couldn't parse %s, missing btime", f.Name())
+ return stat, nil
}
diff --git a/vendor/github.com/prometheus/procfs/ttar b/vendor/github.com/prometheus/procfs/ttar
new file mode 100755
index 0000000..8227a4a
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/ttar
@@ -0,0 +1,264 @@
+#!/usr/bin/env bash
+# Purpose: plain text tar format
+# Limitations: - only suitable for text files, directories, and symlinks
+# - stores only filename, content, and mode
+# - not designed for untrusted input
+
+# Note: must work with bash version 3.2 (macOS)
+
+set -o errexit -o nounset
+
+# Sanitize environment (for instance, standard sorting of glob matches)
+export LC_ALL=C
+
+path=""
+CMD=""
+
+function usage {
+ bname=$(basename "$0")
+ cat << USAGE
+Usage: $bname [-C <DIR>] -c -f <ARCHIVE> <FILE...> (create archive)
+ $bname -t -f <ARCHIVE> (list archive contents)
+ $bname [-C <DIR>] -x -f <ARCHIVE> (extract archive)
+
+Options:
+ -C <DIR> (change directory)
+
+Example: Change to sysfs directory, create ttar file from fixtures directory
+ $bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/
+USAGE
+exit "$1"
+}
+
+function vecho {
+ if [ "${VERBOSE:-}" == "yes" ]; then
+ echo >&7 "$@"
+ fi
+}
+
+function set_cmd {
+ if [ -n "$CMD" ]; then
+ echo "ERROR: more than one command given"
+ echo
+ usage 2
+ fi
+ CMD=$1
+}
+
+while getopts :cf:htxvC: opt; do
+ case $opt in
+ c)
+ set_cmd "create"
+ ;;
+ f)
+ ARCHIVE=$OPTARG
+ ;;
+ h)
+ usage 0
+ ;;
+ t)
+ set_cmd "list"
+ ;;
+ x)
+ set_cmd "extract"
+ ;;
+ v)
+ VERBOSE=yes
+ exec 7>&1
+ ;;
+ C)
+ CDIR=$OPTARG
+ ;;
+ *)
+ echo >&2 "ERROR: invalid option -$OPTARG"
+ echo
+ usage 1
+ ;;
+ esac
+done
+
+# Remove processed options from arguments
+shift $(( OPTIND - 1 ));
+
+if [ "${CMD:-}" == "" ]; then
+ echo >&2 "ERROR: no command given"
+ echo
+ usage 1
+elif [ "${ARCHIVE:-}" == "" ]; then
+ echo >&2 "ERROR: no archive name given"
+ echo
+ usage 1
+fi
+
+function list {
+ local path=""
+ local size=0
+ local line_no=0
+ local ttar_file=$1
+ if [ -n "${2:-}" ]; then
+ echo >&2 "ERROR: too many arguments."
+ echo
+ usage 1
+ fi
+ if [ ! -e "$ttar_file" ]; then
+ echo >&2 "ERROR: file not found ($ttar_file)"
+ echo
+ usage 1
+ fi
+ while read -r line; do
+ line_no=$(( line_no + 1 ))
+ if [ $size -gt 0 ]; then
+ size=$(( size - 1 ))
+ continue
+ fi
+ if [[ $line =~ ^Path:\ (.*)$ ]]; then
+ path=${BASH_REMATCH[1]}
+ elif [[ $line =~ ^Lines:\ (.*)$ ]]; then
+ size=${BASH_REMATCH[1]}
+ echo "$path"
+ elif [[ $line =~ ^Directory:\ (.*)$ ]]; then
+ path=${BASH_REMATCH[1]}
+ echo "$path/"
+ elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then
+ echo "$path -> ${BASH_REMATCH[1]}"
+ fi
+ done < "$ttar_file"
+}
+
+function extract {
+ local path=""
+ local size=0
+ local line_no=0
+ local ttar_file=$1
+ if [ -n "${2:-}" ]; then
+ echo >&2 "ERROR: too many arguments."
+ echo
+ usage 1
+ fi
+ if [ ! -e "$ttar_file" ]; then
+ echo >&2 "ERROR: file not found ($ttar_file)"
+ echo
+ usage 1
+ fi
+ while IFS= read -r line; do
+ line_no=$(( line_no + 1 ))
+ if [ "$size" -gt 0 ]; then
+ echo "$line" >> "$path"
+ size=$(( size - 1 ))
+ continue
+ fi
+ if [[ $line =~ ^Path:\ (.*)$ ]]; then
+ path=${BASH_REMATCH[1]}
+ if [ -e "$path" ] || [ -L "$path" ]; then
+ rm "$path"
+ fi
+ elif [[ $line =~ ^Lines:\ (.*)$ ]]; then
+ size=${BASH_REMATCH[1]}
+ # Create file even if it is zero-length.
+ touch "$path"
+ vecho " $path"
+ elif [[ $line =~ ^Mode:\ (.*)$ ]]; then
+ mode=${BASH_REMATCH[1]}
+ chmod "$mode" "$path"
+ vecho "$mode"
+ elif [[ $line =~ ^Directory:\ (.*)$ ]]; then
+ path=${BASH_REMATCH[1]}
+ mkdir -p "$path"
+ vecho " $path/"
+ elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then
+ ln -s "${BASH_REMATCH[1]}" "$path"
+ vecho " $path -> ${BASH_REMATCH[1]}"
+ elif [[ $line =~ ^# ]]; then
+ # Ignore comments between files
+ continue
+ else
+ echo >&2 "ERROR: Unknown keyword on line $line_no: $line"
+ exit 1
+ fi
+ done < "$ttar_file"
+}
+
+function div {
+ echo "# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -" \
+ "- - - - - -"
+}
+
+function get_mode {
+ local mfile=$1
+ if [ -z "${STAT_OPTION:-}" ]; then
+ if stat -c '%a' "$mfile" >/dev/null 2>&1; then
+ STAT_OPTION='-c'
+ STAT_FORMAT='%a'
+ else
+ STAT_OPTION='-f'
+ STAT_FORMAT='%A'
+ fi
+ fi
+ stat "${STAT_OPTION}" "${STAT_FORMAT}" "$mfile"
+}
+
+function _create {
+ shopt -s nullglob
+ local mode
+ while (( "$#" )); do
+ file=$1
+ if [ -L "$file" ]; then
+ echo "Path: $file"
+ symlinkTo=$(readlink "$file")
+ echo "SymlinkTo: $symlinkTo"
+ vecho " $file -> $symlinkTo"
+ div
+ elif [ -d "$file" ]; then
+ # Strip trailing slash (if there is one)
+ file=${file%/}
+ echo "Directory: $file"
+ mode=$(get_mode "$file")
+ echo "Mode: $mode"
+ vecho "$mode $file/"
+ div
+ # Find all files and dirs, including hidden/dot files
+ for x in "$file/"{*,.[^.]*}; do
+ _create "$x"
+ done
+ elif [ -f "$file" ]; then
+ echo "Path: $file"
+ lines=$(wc -l "$file"|awk '{print $1}')
+ echo "Lines: $lines"
+ cat "$file"
+ mode=$(get_mode "$file")
+ echo "Mode: $mode"
+ vecho "$mode $file"
+ div
+ else
+ echo >&2 "ERROR: file not found ($file in $(pwd))"
+ exit 2
+ fi
+ shift
+ done
+}
+
+function create {
+ ttar_file=$1
+ shift
+ if [ -z "${1:-}" ]; then
+ echo >&2 "ERROR: missing arguments."
+ echo
+ usage 1
+ fi
+ if [ -e "$ttar_file" ]; then
+ rm "$ttar_file"
+ fi
+ exec > "$ttar_file"
+ _create "$@"
+}
+
+if [ -n "${CDIR:-}" ]; then
+ if [[ "$ARCHIVE" != /* ]]; then
+ # Relative path: preserve the archive's location before changing
+ # directory
+ ARCHIVE="$(pwd)/$ARCHIVE"
+ fi
+ cd "$CDIR"
+fi
+
+"$CMD" "$ARCHIVE" "$@"
diff --git a/vendor/github.com/prometheus/procfs/xfrm.go b/vendor/github.com/prometheus/procfs/xfrm.go
new file mode 100644
index 0000000..ffe9df5
--- /dev/null
+++ b/vendor/github.com/prometheus/procfs/xfrm.go
@@ -0,0 +1,187 @@
+// Copyright 2017 Prometheus Team
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package procfs
+
+import (
+ "bufio"
+ "fmt"
+ "os"
+ "strconv"
+ "strings"
+)
+
+// XfrmStat models the contents of /proc/net/xfrm_stat.
+type XfrmStat struct {
+ // All errors which are not matched by other
+ XfrmInError int
+ // No buffer is left
+ XfrmInBufferError int
+ // Header Error
+ XfrmInHdrError int
+ // No state found
+ // i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong
+ XfrmInNoStates int
+ // Transformation protocol specific error
+ // e.g. SA Key is wrong
+ XfrmInStateProtoError int
+ // Transformation mode specific error
+ XfrmInStateModeError int
+ // Sequence error
+ // e.g. sequence number is out of window
+ XfrmInStateSeqError int
+ // State is expired
+ XfrmInStateExpired int
+ // State has mismatch option
+ // e.g. UDP encapsulation type is mismatched
+ XfrmInStateMismatch int
+ // State is invalid
+ XfrmInStateInvalid int
+ // No matching template for states
+ // e.g. Inbound SAs are correct but SP rule is wrong
+ XfrmInTmplMismatch int
+ // No policy is found for states
+ // e.g. Inbound SAs are correct but no SP is found
+ XfrmInNoPols int
+ // Policy discards
+ XfrmInPolBlock int
+ // Policy error
+ XfrmInPolError int
+ // All errors which are not matched by others
+ XfrmOutError int
+ // Bundle generation error
+ XfrmOutBundleGenError int
+ // Bundle check error
+ XfrmOutBundleCheckError int
+ // No state was found
+ XfrmOutNoStates int
+ // Transformation protocol specific error
+ XfrmOutStateProtoError int
+ // Transportation mode specific error
+ XfrmOutStateModeError int
+ // Sequence error
+ // i.e sequence number overflow
+ XfrmOutStateSeqError int
+ // State is expired
+ XfrmOutStateExpired int
+ // Policy discads
+ XfrmOutPolBlock int
+ // Policy is dead
+ XfrmOutPolDead int
+ // Policy Error
+ XfrmOutPolError int
+ XfrmFwdHdrError int
+ XfrmOutStateInvalid int
+ XfrmAcquireError int
+}
+
+// NewXfrmStat reads the xfrm_stat statistics.
+func NewXfrmStat() (XfrmStat, error) {
+ fs, err := NewFS(DefaultMountPoint)
+ if err != nil {
+ return XfrmStat{}, err
+ }
+
+ return fs.NewXfrmStat()
+}
+
+// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem.
+func (fs FS) NewXfrmStat() (XfrmStat, error) {
+ file, err := os.Open(fs.Path("net/xfrm_stat"))
+ if err != nil {
+ return XfrmStat{}, err
+ }
+ defer file.Close()
+
+ var (
+ x = XfrmStat{}
+ s = bufio.NewScanner(file)
+ )
+
+ for s.Scan() {
+ fields := strings.Fields(s.Text())
+
+ if len(fields) != 2 {
+ return XfrmStat{}, fmt.Errorf(
+ "couldnt parse %s line %s", file.Name(), s.Text())
+ }
+
+ name := fields[0]
+ value, err := strconv.Atoi(fields[1])
+ if err != nil {
+ return XfrmStat{}, err
+ }
+
+ switch name {
+ case "XfrmInError":
+ x.XfrmInError = value
+ case "XfrmInBufferError":
+ x.XfrmInBufferError = value
+ case "XfrmInHdrError":
+ x.XfrmInHdrError = value
+ case "XfrmInNoStates":
+ x.XfrmInNoStates = value
+ case "XfrmInStateProtoError":
+ x.XfrmInStateProtoError = value
+ case "XfrmInStateModeError":
+ x.XfrmInStateModeError = value
+ case "XfrmInStateSeqError":
+ x.XfrmInStateSeqError = value
+ case "XfrmInStateExpired":
+ x.XfrmInStateExpired = value
+ case "XfrmInStateInvalid":
+ x.XfrmInStateInvalid = value
+ case "XfrmInTmplMismatch":
+ x.XfrmInTmplMismatch = value
+ case "XfrmInNoPols":
+ x.XfrmInNoPols = value
+ case "XfrmInPolBlock":
+ x.XfrmInPolBlock = value
+ case "XfrmInPolError":
+ x.XfrmInPolError = value
+ case "XfrmOutError":
+ x.XfrmOutError = value
+ case "XfrmInStateMismatch":
+ x.XfrmInStateMismatch = value
+ case "XfrmOutBundleGenError":
+ x.XfrmOutBundleGenError = value
+ case "XfrmOutBundleCheckError":
+ x.XfrmOutBundleCheckError = value
+ case "XfrmOutNoStates":
+ x.XfrmOutNoStates = value
+ case "XfrmOutStateProtoError":
+ x.XfrmOutStateProtoError = value
+ case "XfrmOutStateModeError":
+ x.XfrmOutStateModeError = value
+ case "XfrmOutStateSeqError":
+ x.XfrmOutStateSeqError = value
+ case "XfrmOutStateExpired":
+ x.XfrmOutStateExpired = value
+ case "XfrmOutPolBlock":
+ x.XfrmOutPolBlock = value
+ case "XfrmOutPolDead":
+ x.XfrmOutPolDead = value
+ case "XfrmOutPolError":
+ x.XfrmOutPolError = value
+ case "XfrmFwdHdrError":
+ x.XfrmFwdHdrError = value
+ case "XfrmOutStateInvalid":
+ x.XfrmOutStateInvalid = value
+ case "XfrmAcquireError":
+ x.XfrmAcquireError = value
+ }
+
+ }
+
+ return x, s.Err()
+}
diff --git a/vendor/github.com/prometheus/procfs/xfs/parse.go b/vendor/github.com/prometheus/procfs/xfs/parse.go
index d1285fa..c8f6279 100644
--- a/vendor/github.com/prometheus/procfs/xfs/parse.go
+++ b/vendor/github.com/prometheus/procfs/xfs/parse.go
@@ -17,7 +17,6 @@ import (
"bufio"
"fmt"
"io"
- "log"
"strconv"
"strings"
)
@@ -273,7 +272,6 @@ func vnodeStats(us []uint32) (VnodeStats, error) {
// stats versions. Therefore, 7 or 8 elements may appear in
// this slice.
l := len(us)
- log.Println(l)
if l != 7 && l != 8 {
return VnodeStats{}, fmt.Errorf("incorrect number of values for XFS vnode stats: %d", l)
}
diff --git a/vendor/github.com/prometheus/procfs/xfs/xfs.go b/vendor/github.com/prometheus/procfs/xfs/xfs.go
index ed77d90..d86794b 100644
--- a/vendor/github.com/prometheus/procfs/xfs/xfs.go
+++ b/vendor/github.com/prometheus/procfs/xfs/xfs.go
@@ -22,6 +22,11 @@ package xfs
// kernel source. Most counters are uint32s (same data types used in
// xfs_stats.h), but some of the "extended precision stats" are uint64s.
type Stats struct {
+ // The name of the filesystem used to source these statistics.
+ // If empty, this indicates aggregated statistics for all XFS
+ // filesystems on the host.
+ Name string
+
ExtentAllocation ExtentAllocationStats
AllocationBTree BTreeStats
BlockMapping BlockMappingStats
diff --git a/vendor/github.com/sethgrid/pester/README.md b/vendor/github.com/sethgrid/pester/README.md
index e41f4d6..ac1d6d9 100644
--- a/vendor/github.com/sethgrid/pester/README.md
+++ b/vendor/github.com/sethgrid/pester/README.md
@@ -35,7 +35,7 @@ client.Backoff = func(retry int) time.Duration {
For a complete and working example, see the sample directory.
`pester` allows you to use a constructor to control:
- backoff strategy
-- reties
+- retries
- concurrency
- keeping a log for debugging
```go
diff --git a/vendor/github.com/sethgrid/pester/main.go b/vendor/github.com/sethgrid/pester/main.go
index 4a69791..55db6fa 100644
--- a/vendor/github.com/sethgrid/pester/main.go
+++ b/vendor/github.com/sethgrid/pester/main.go
@@ -109,8 +109,8 @@ func NewExtendedClient(hc *http.Client) *Client {
return c
}
-// PrintErrStrategy is used to log attempts as they happen.
-// You know, more visible
+// LogHook is used to log attempts as they happen. This function is never called,
+// however, if KeepLog is set to true.
type LogHook func(e ErrEntry)
// BackoffStrategy is used to determine how long a retry request should wait until attempted
diff --git a/vendor/github.com/soheilhy/cmux/README.md b/vendor/github.com/soheilhy/cmux/README.md
index b1c75a7..70306e6 100644
--- a/vendor/github.com/soheilhy/cmux/README.md
+++ b/vendor/github.com/soheilhy/cmux/README.md
@@ -32,7 +32,7 @@ httpS := &http.Server{
}
trpcS := rpc.NewServer()
-s.Register(&ExampleRPCRcvr{})
+trpcS.Register(&ExampleRPCRcvr{})
// Use the muxed listeners for your servers.
go grpcS.Serve(grpcL)
diff --git a/vendor/github.com/soheilhy/cmux/matchers.go b/vendor/github.com/soheilhy/cmux/matchers.go
index 485ede8..652fd86 100644
--- a/vendor/github.com/soheilhy/cmux/matchers.go
+++ b/vendor/github.com/soheilhy/cmux/matchers.go
@@ -16,6 +16,7 @@ package cmux
import (
"bufio"
+ "crypto/tls"
"io"
"io/ioutil"
"net/http"
@@ -37,6 +38,11 @@ func PrefixMatcher(strs ...string) Matcher {
return pt.matchPrefix
}
+func prefixByteMatcher(list ...[]byte) Matcher {
+ pt := newPatriciaTree(list...)
+ return pt.matchPrefix
+}
+
var defaultHTTPMethods = []string{
"OPTIONS",
"GET",
@@ -57,6 +63,27 @@ func HTTP1Fast(extMethods ...string) Matcher {
return PrefixMatcher(append(defaultHTTPMethods, extMethods...)...)
}
+// TLS matches HTTPS requests.
+//
+// By default, any TLS handshake packet is matched. An optional whitelist
+// of versions can be passed in to restrict the matcher, for example:
+// TLS(tls.VersionTLS11, tls.VersionTLS12)
+func TLS(versions ...int) Matcher {
+ if len(versions) == 0 {
+ versions = []int{
+ tls.VersionSSL30,
+ tls.VersionTLS10,
+ tls.VersionTLS11,
+ tls.VersionTLS12,
+ }
+ }
+ prefixes := [][]byte{}
+ for _, v := range versions {
+ prefixes = append(prefixes, []byte{22, byte(v >> 8 & 0xff), byte(v & 0xff)})
+ }
+ return prefixByteMatcher(prefixes...)
+}
+
const maxHTTPRead = 4096
// HTTP1 parses the first line or upto 4096 bytes of the request to see if
@@ -100,15 +127,41 @@ func HTTP2() Matcher {
// request of an HTTP 1 connection.
func HTTP1HeaderField(name, value string) Matcher {
return func(r io.Reader) bool {
- return matchHTTP1Field(r, name, value)
+ return matchHTTP1Field(r, name, func(gotValue string) bool {
+ return gotValue == value
+ })
+ }
+}
+
+// HTTP1HeaderFieldPrefix returns a matcher matching the header fields of the
+// first request of an HTTP 1 connection. If the header with key name has a
+// value prefixed with valuePrefix, this will match.
+func HTTP1HeaderFieldPrefix(name, valuePrefix string) Matcher {
+ return func(r io.Reader) bool {
+ return matchHTTP1Field(r, name, func(gotValue string) bool {
+ return strings.HasPrefix(gotValue, valuePrefix)
+ })
}
}
-// HTTP2HeaderField resturns a matcher matching the header fields of the first
+// HTTP2HeaderField returns a matcher matching the header fields of the first
// headers frame.
func HTTP2HeaderField(name, value string) Matcher {
return func(r io.Reader) bool {
- return matchHTTP2Field(ioutil.Discard, r, name, value)
+ return matchHTTP2Field(ioutil.Discard, r, name, func(gotValue string) bool {
+ return gotValue == value
+ })
+ }
+}
+
+// HTTP2HeaderFieldPrefix returns a matcher matching the header fields of the
+// first headers frame. If the header with key name has a value prefixed with
+// valuePrefix, this will match.
+func HTTP2HeaderFieldPrefix(name, valuePrefix string) Matcher {
+ return func(r io.Reader) bool {
+ return matchHTTP2Field(ioutil.Discard, r, name, func(gotValue string) bool {
+ return strings.HasPrefix(gotValue, valuePrefix)
+ })
}
}
@@ -117,29 +170,54 @@ func HTTP2HeaderField(name, value string) Matcher {
// does not block on receiving a SETTING frame.
func HTTP2MatchHeaderFieldSendSettings(name, value string) MatchWriter {
return func(w io.Writer, r io.Reader) bool {
- return matchHTTP2Field(w, r, name, value)
+ return matchHTTP2Field(w, r, name, func(gotValue string) bool {
+ return gotValue == value
+ })
+ }
+}
+
+// HTTP2MatchHeaderFieldPrefixSendSettings matches the header field prefix
+// and writes the settings to the server. Prefer HTTP2HeaderFieldPrefix over
+// this one, if the client does not block on receiving a SETTING frame.
+func HTTP2MatchHeaderFieldPrefixSendSettings(name, valuePrefix string) MatchWriter {
+ return func(w io.Writer, r io.Reader) bool {
+ return matchHTTP2Field(w, r, name, func(gotValue string) bool {
+ return strings.HasPrefix(gotValue, valuePrefix)
+ })
}
}
func hasHTTP2Preface(r io.Reader) bool {
var b [len(http2.ClientPreface)]byte
- if _, err := io.ReadFull(r, b[:]); err != nil {
- return false
- }
+ last := 0
+
+ for {
+ n, err := r.Read(b[last:])
+ if err != nil {
+ return false
+ }
- return string(b[:]) == http2.ClientPreface
+ last += n
+ eq := string(b[:last]) == http2.ClientPreface[:last]
+ if last == len(http2.ClientPreface) {
+ return eq
+ }
+ if !eq {
+ return false
+ }
+ }
}
-func matchHTTP1Field(r io.Reader, name, value string) (matched bool) {
+func matchHTTP1Field(r io.Reader, name string, matches func(string) bool) (matched bool) {
req, err := http.ReadRequest(bufio.NewReader(r))
if err != nil {
return false
}
- return req.Header.Get(name) == value
+ return matches(req.Header.Get(name))
}
-func matchHTTP2Field(w io.Writer, r io.Reader, name, value string) (matched bool) {
+func matchHTTP2Field(w io.Writer, r io.Reader, name string, matches func(string) bool) (matched bool) {
if !hasHTTP2Preface(r) {
return false
}
@@ -149,7 +227,7 @@ func matchHTTP2Field(w io.Writer, r io.Reader, name, value string) (matched bool
hdec := hpack.NewDecoder(uint32(4<<10), func(hf hpack.HeaderField) {
if hf.Name == name {
done = true
- if hf.Value == value {
+ if matches(hf.Value) {
matched = true
}
}
diff --git a/vendor/github.com/spf13/afero/README.md b/vendor/github.com/spf13/afero/README.md
index d9e3327..0c9b04b 100644
--- a/vendor/github.com/spf13/afero/README.md
+++ b/vendor/github.com/spf13/afero/README.md
@@ -61,11 +61,11 @@ import "github.com/spf13/afero"
First define a package variable and set it to a pointer to a filesystem.
```go
-var AppFs afero.Fs = afero.NewMemMapFs()
+var AppFs = afero.NewMemMapFs()
or
-var AppFs afero.Fs = afero.NewOsFs()
+var AppFs = afero.NewOsFs()
```
It is important to note that if you repeat the composite literal you
will be using a completely new and isolated filesystem. In the case of
@@ -81,7 +81,10 @@ So if my application before had:
```go
os.Open('/tmp/foo')
```
-We would replace it with a call to `AppFs.Open('/tmp/foo')`.
+We would replace it with:
+```go
+AppFs.Open('/tmp/foo')
+```
`AppFs` being the variable we defined above.
@@ -166,8 +169,8 @@ f, err := afero.TempFile(fs,"", "ioutil-test")
### Calling via Afero
```go
-fs := afero.NewMemMapFs
-afs := &Afero{Fs: fs}
+fs := afero.NewMemMapFs()
+afs := &afero.Afero{Fs: fs}
f, err := afs.TempFile("", "ioutil-test")
```
diff --git a/vendor/github.com/spf13/afero/appveyor.yml b/vendor/github.com/spf13/afero/appveyor.yml
index 006f315..a633ad5 100644
--- a/vendor/github.com/spf13/afero/appveyor.yml
+++ b/vendor/github.com/spf13/afero/appveyor.yml
@@ -12,4 +12,4 @@ build_script:
go build github.com/spf13/afero
test_script:
-- cmd: go test -v github.com/spf13/afero
+- cmd: go test -race -v github.com/spf13/afero/...
diff --git a/vendor/github.com/spf13/afero/cacheOnReadFs.go b/vendor/github.com/spf13/afero/cacheOnReadFs.go
index e54a4f8..b026e0d 100644
--- a/vendor/github.com/spf13/afero/cacheOnReadFs.go
+++ b/vendor/github.com/spf13/afero/cacheOnReadFs.go
@@ -64,15 +64,10 @@ func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileIn
return cacheHit, lfi, nil
}
- if err == syscall.ENOENT {
+ if err == syscall.ENOENT || os.IsNotExist(err) {
return cacheMiss, nil, nil
}
- var ok bool
- if err, ok = err.(*os.PathError); ok {
- if err == os.ErrNotExist {
- return cacheMiss, nil, nil
- }
- }
+
return cacheMiss, nil, err
}
diff --git a/vendor/github.com/spf13/afero/match.go b/vendor/github.com/spf13/afero/match.go
new file mode 100644
index 0000000..08b3b7e
--- /dev/null
+++ b/vendor/github.com/spf13/afero/match.go
@@ -0,0 +1,110 @@
+// Copyright © 2014 Steve Francia <spf@spf13.com>.
+// Copyright 2009 The Go Authors. All rights reserved.
+
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package afero
+
+import (
+ "path/filepath"
+ "sort"
+ "strings"
+)
+
+// Glob returns the names of all files matching pattern or nil
+// if there is no matching file. The syntax of patterns is the same
+// as in Match. The pattern may describe hierarchical names such as
+// /usr/*/bin/ed (assuming the Separator is '/').
+//
+// Glob ignores file system errors such as I/O errors reading directories.
+// The only possible returned error is ErrBadPattern, when pattern
+// is malformed.
+//
+// This was adapted from (http://golang.org/pkg/path/filepath) and uses several
+// built-ins from that package.
+func Glob(fs Fs, pattern string) (matches []string, err error) {
+ if !hasMeta(pattern) {
+ // afero does not support Lstat directly.
+ if _, err = lstatIfOs(fs, pattern); err != nil {
+ return nil, nil
+ }
+ return []string{pattern}, nil
+ }
+
+ dir, file := filepath.Split(pattern)
+ switch dir {
+ case "":
+ dir = "."
+ case string(filepath.Separator):
+ // nothing
+ default:
+ dir = dir[0 : len(dir)-1] // chop off trailing separator
+ }
+
+ if !hasMeta(dir) {
+ return glob(fs, dir, file, nil)
+ }
+
+ var m []string
+ m, err = Glob(fs, dir)
+ if err != nil {
+ return
+ }
+ for _, d := range m {
+ matches, err = glob(fs, d, file, matches)
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// glob searches for files matching pattern in the directory dir
+// and appends them to matches. If the directory cannot be
+// opened, it returns the existing matches. New matches are
+// added in lexicographical order.
+func glob(fs Fs, dir, pattern string, matches []string) (m []string, e error) {
+ m = matches
+ fi, err := fs.Stat(dir)
+ if err != nil {
+ return
+ }
+ if !fi.IsDir() {
+ return
+ }
+ d, err := fs.Open(dir)
+ if err != nil {
+ return
+ }
+ defer d.Close()
+
+ names, _ := d.Readdirnames(-1)
+ sort.Strings(names)
+
+ for _, n := range names {
+ matched, err := filepath.Match(pattern, n)
+ if err != nil {
+ return m, err
+ }
+ if matched {
+ m = append(m, filepath.Join(dir, n))
+ }
+ }
+ return
+}
+
+// hasMeta reports whether path contains any of the magic characters
+// recognized by Match.
+func hasMeta(path string) bool {
+ // TODO(niemeyer): Should other magic characters be added here?
+ return strings.IndexAny(path, "*?[") >= 0
+}
diff --git a/vendor/github.com/spf13/afero/mem/file.go b/vendor/github.com/spf13/afero/mem/file.go
index e41e012..5401a3b 100644
--- a/vendor/github.com/spf13/afero/mem/file.go
+++ b/vendor/github.com/spf13/afero/mem/file.go
@@ -74,14 +74,24 @@ func CreateDir(name string) *FileData {
}
func ChangeFileName(f *FileData, newname string) {
+ f.Lock()
f.name = newname
+ f.Unlock()
}
func SetMode(f *FileData, mode os.FileMode) {
+ f.Lock()
f.mode = mode
+ f.Unlock()
}
func SetModTime(f *FileData, mtime time.Time) {
+ f.Lock()
+ setModTime(f, mtime)
+ f.Unlock()
+}
+
+func setModTime(f *FileData, mtime time.Time) {
f.modtime = mtime
}
@@ -102,7 +112,7 @@ func (f *File) Close() error {
f.fileData.Lock()
f.closed = true
if !f.readOnly {
- SetModTime(f.fileData, time.Now())
+ setModTime(f.fileData, time.Now())
}
f.fileData.Unlock()
return nil
@@ -197,7 +207,7 @@ func (f *File) Truncate(size int64) error {
} else {
f.fileData.data = f.fileData.data[0:size]
}
- SetModTime(f.fileData, time.Now())
+ setModTime(f.fileData, time.Now())
return nil
}
@@ -236,7 +246,7 @@ func (f *File) Write(b []byte) (n int, err error) {
f.fileData.data = append(f.fileData.data[:cur], b...)
f.fileData.data = append(f.fileData.data, tail...)
}
- SetModTime(f.fileData, time.Now())
+ setModTime(f.fileData, time.Now())
atomic.StoreInt64(&f.at, int64(len(f.fileData.data)))
return
@@ -261,17 +271,33 @@ type FileInfo struct {
// Implements os.FileInfo
func (s *FileInfo) Name() string {
+ s.Lock()
_, name := filepath.Split(s.name)
+ s.Unlock()
return name
}
-func (s *FileInfo) Mode() os.FileMode { return s.mode }
-func (s *FileInfo) ModTime() time.Time { return s.modtime }
-func (s *FileInfo) IsDir() bool { return s.dir }
-func (s *FileInfo) Sys() interface{} { return nil }
+func (s *FileInfo) Mode() os.FileMode {
+ s.Lock()
+ defer s.Unlock()
+ return s.mode
+}
+func (s *FileInfo) ModTime() time.Time {
+ s.Lock()
+ defer s.Unlock()
+ return s.modtime
+}
+func (s *FileInfo) IsDir() bool {
+ s.Lock()
+ defer s.Unlock()
+ return s.dir
+}
+func (s *FileInfo) Sys() interface{} { return nil }
func (s *FileInfo) Size() int64 {
if s.IsDir() {
return int64(42)
}
+ s.Lock()
+ defer s.Unlock()
return int64(len(s.data))
}
diff --git a/vendor/github.com/spf13/afero/memmap.go b/vendor/github.com/spf13/afero/memmap.go
index 767ac1d..09498e7 100644
--- a/vendor/github.com/spf13/afero/memmap.go
+++ b/vendor/github.com/spf13/afero/memmap.go
@@ -66,7 +66,10 @@ func (m *MemMapFs) unRegisterWithParent(fileName string) error {
if parent == nil {
log.Panic("parent of ", f.Name(), " is nil")
}
+
+ parent.Lock()
mem.RemoveFromMemDir(parent, f)
+ parent.Unlock()
return nil
}
@@ -99,8 +102,10 @@ func (m *MemMapFs) registerWithParent(f *mem.FileData) {
}
}
+ parent.Lock()
mem.InitializeDir(parent)
mem.AddToMemDir(parent, f)
+ parent.Unlock()
}
func (m *MemMapFs) lockfreeMkdir(name string, perm os.FileMode) error {
@@ -136,7 +141,7 @@ func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error {
m.registerWithParent(item)
m.mu.Unlock()
- m.Chmod(name, perm)
+ m.Chmod(name, perm|os.ModeDir)
return nil
}
@@ -146,9 +151,8 @@ func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error {
if err != nil {
if err.(*os.PathError).Err == ErrFileExists {
return nil
- } else {
- return err
}
+ return err
}
return nil
}
diff --git a/vendor/github.com/spf13/afero/memradix.go b/vendor/github.com/spf13/afero/memradix.go
deleted file mode 100644
index 87527f3..0000000
--- a/vendor/github.com/spf13/afero/memradix.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright © 2014 Steve Francia <spf@spf13.com>.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package afero
diff --git a/vendor/github.com/spf13/cast/cast.go b/vendor/github.com/spf13/cast/cast.go
index dc504b4..8b8c208 100644
--- a/vendor/github.com/spf13/cast/cast.go
+++ b/vendor/github.com/spf13/cast/cast.go
@@ -151,3 +151,9 @@ func ToIntSlice(i interface{}) []int {
v, _ := ToIntSliceE(i)
return v
}
+
+// ToDurationSlice casts an interface to a []time.Duration type.
+func ToDurationSlice(i interface{}) []time.Duration {
+ v, _ := ToDurationSliceE(i)
+ return v
+}
diff --git a/vendor/github.com/spf13/cast/caste.go b/vendor/github.com/spf13/cast/caste.go
index 4e75f64..81511fe 100644
--- a/vendor/github.com/spf13/cast/caste.go
+++ b/vendor/github.com/spf13/cast/caste.go
@@ -1077,6 +1077,35 @@ func ToIntSliceE(i interface{}) ([]int, error) {
}
}
+// ToDurationSliceE casts an interface to a []time.Duration type.
+func ToDurationSliceE(i interface{}) ([]time.Duration, error) {
+ if i == nil {
+ return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i)
+ }
+
+ switch v := i.(type) {
+ case []time.Duration:
+ return v, nil
+ }
+
+ kind := reflect.TypeOf(i).Kind()
+ switch kind {
+ case reflect.Slice, reflect.Array:
+ s := reflect.ValueOf(i)
+ a := make([]time.Duration, s.Len())
+ for j := 0; j < s.Len(); j++ {
+ val, err := ToDurationE(s.Index(j).Interface())
+ if err != nil {
+ return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i)
+ }
+ a[j] = val
+ }
+ return a, nil
+ default:
+ return []time.Duration{}, fmt.Errorf("unable to cast %#v of type %T to []time.Duration", i, i)
+ }
+}
+
// StringToDate attempts to parse a string into a time.Time type using a
// predefined list of formats. If no suitable format is found, an error is
// returned.
diff --git a/vendor/github.com/spf13/jwalterweatherman/log_counter.go b/vendor/github.com/spf13/jwalterweatherman/log_counter.go
index 570db1d..11423ac 100644
--- a/vendor/github.com/spf13/jwalterweatherman/log_counter.go
+++ b/vendor/github.com/spf13/jwalterweatherman/log_counter.go
@@ -27,7 +27,6 @@ func (c *logCounter) getCount() uint64 {
func (c *logCounter) Write(p []byte) (n int, err error) {
c.incr()
-
return len(p), nil
}
diff --git a/vendor/github.com/spf13/jwalterweatherman/notepad.go b/vendor/github.com/spf13/jwalterweatherman/notepad.go
index 5a623f4..ae5aaf7 100644
--- a/vendor/github.com/spf13/jwalterweatherman/notepad.go
+++ b/vendor/github.com/spf13/jwalterweatherman/notepad.go
@@ -9,7 +9,6 @@ import (
"fmt"
"io"
"log"
- "os"
)
type Threshold int
@@ -38,11 +37,7 @@ var prefixes map[Threshold]string = map[Threshold]string{
LevelFatal: "FATAL",
}
-func prefix(t Threshold) string {
- return t.String() + " "
-}
-
-// Notepad is where you leave a note !
+// Notepad is where you leave a note!
type Notepad struct {
TRACE *log.Logger
DEBUG *log.Logger
@@ -55,7 +50,7 @@ type Notepad struct {
LOG *log.Logger
FEEDBACK *Feedback
- loggers []**log.Logger
+ loggers [7]**log.Logger
logHandle io.Writer
outHandle io.Writer
logThreshold Threshold
@@ -71,11 +66,11 @@ type Notepad struct {
func NewNotepad(outThreshold Threshold, logThreshold Threshold, outHandle, logHandle io.Writer, prefix string, flags int) *Notepad {
n := &Notepad{}
- n.loggers = append(n.loggers, &n.TRACE, &n.DEBUG, &n.INFO, &n.WARN, &n.ERROR, &n.CRITICAL, &n.FATAL)
- n.logHandle = logHandle
+ n.loggers = [7]**log.Logger{&n.TRACE, &n.DEBUG, &n.INFO, &n.WARN, &n.ERROR, &n.CRITICAL, &n.FATAL}
n.outHandle = outHandle
- n.logThreshold = logThreshold
+ n.logHandle = logHandle
n.stdoutThreshold = outThreshold
+ n.logThreshold = logThreshold
if len(prefix) != 0 {
n.prefix = "[" + prefix + "] "
@@ -88,54 +83,48 @@ func NewNotepad(outThreshold Threshold, logThreshold Threshold, outHandle, logHa
n.LOG = log.New(n.logHandle,
"LOG: ",
n.flags)
-
- n.FEEDBACK = &Feedback{n}
+ n.FEEDBACK = &Feedback{out: log.New(outHandle, "", 0), log: n.LOG}
n.init()
-
return n
}
-// Feedback is special. It writes plainly to the output while
-// logging with the standard extra information (date, file, etc)
-// Only Println and Printf are currently provided for this
-type Feedback struct {
- *Notepad
-}
-
-// init create the loggers for each level depending on the notepad thresholds
+// init creates the loggers for each level depending on the notepad thresholds.
func (n *Notepad) init() {
- bothHandle := io.MultiWriter(n.outHandle, n.logHandle)
+ logAndOut := io.MultiWriter(n.outHandle, n.logHandle)
for t, logger := range n.loggers {
threshold := Threshold(t)
counter := &logCounter{}
n.logCounters[t] = counter
+ prefix := n.prefix + threshold.String() + " "
switch {
case threshold >= n.logThreshold && threshold >= n.stdoutThreshold:
- *logger = log.New(io.MultiWriter(counter, bothHandle), n.prefix+prefix(threshold), n.flags)
+ *logger = log.New(io.MultiWriter(counter, logAndOut), prefix, n.flags)
case threshold >= n.logThreshold:
- *logger = log.New(io.MultiWriter(counter, n.logHandle), n.prefix+prefix(threshold), n.flags)
+ *logger = log.New(io.MultiWriter(counter, n.logHandle), prefix, n.flags)
case threshold >= n.stdoutThreshold:
- *logger = log.New(io.MultiWriter(counter, os.Stdout), n.prefix+prefix(threshold), n.flags)
+ *logger = log.New(io.MultiWriter(counter, n.outHandle), prefix, n.flags)
default:
- *logger = log.New(counter, n.prefix+prefix(threshold), n.flags)
+ // counter doesn't care about prefix and flags, so don't use them
+ // for performance.
+ *logger = log.New(counter, "", 0)
}
}
}
-// SetLogThreshold change the threshold above which messages are written to the
-// log file
+// SetLogThreshold changes the threshold above which messages are written to the
+// log file.
func (n *Notepad) SetLogThreshold(threshold Threshold) {
n.logThreshold = threshold
n.init()
}
-// SetLogOutput change the file where log messages are written
+// SetLogOutput changes the file where log messages are written.
func (n *Notepad) SetLogOutput(handle io.Writer) {
n.logHandle = handle
n.init()
@@ -146,8 +135,8 @@ func (n *Notepad) GetLogThreshold() Threshold {
return n.logThreshold
}
-// SetStdoutThreshold change the threshold above which messages are written to the
-// standard output
+// SetStdoutThreshold changes the threshold above which messages are written to the
+// standard output.
func (n *Notepad) SetStdoutThreshold(threshold Threshold) {
n.stdoutThreshold = threshold
n.init()
@@ -158,8 +147,8 @@ func (n *Notepad) GetStdoutThreshold() Threshold {
return n.stdoutThreshold
}
-// SetPrefix change the prefix used by the notepad. Prefixes are displayed between
-// brackets at the begining of the line. An empty prefix won't be displayed at all.
+// SetPrefix changes the prefix used by the notepad. Prefixes are displayed between
+// brackets at the beginning of the line. An empty prefix won't be displayed at all.
func (n *Notepad) SetPrefix(prefix string) {
if len(prefix) != 0 {
n.prefix = "[" + prefix + "] "
@@ -176,20 +165,30 @@ func (n *Notepad) SetFlags(flags int) {
n.init()
}
-// Feedback is special. It writes plainly to the output while
-// logging with the standard extra information (date, file, etc)
-// Only Println and Printf are currently provided for this
+// Feedback writes plainly to the outHandle while
+// logging with the standard extra information (date, file, etc).
+type Feedback struct {
+ out *log.Logger
+ log *log.Logger
+}
+
func (fb *Feedback) Println(v ...interface{}) {
- s := fmt.Sprintln(v...)
- fmt.Print(s)
- fb.LOG.Output(2, s)
+ fb.output(fmt.Sprintln(v...))
}
-// Feedback is special. It writes plainly to the output while
-// logging with the standard extra information (date, file, etc)
-// Only Println and Printf are currently provided for this
func (fb *Feedback) Printf(format string, v ...interface{}) {
- s := fmt.Sprintf(format, v...)
- fmt.Print(s)
- fb.LOG.Output(2, s)
+ fb.output(fmt.Sprintf(format, v...))
+}
+
+func (fb *Feedback) Print(v ...interface{}) {
+ fb.output(fmt.Sprint(v...))
+}
+
+func (fb *Feedback) output(s string) {
+ if fb.out != nil {
+ fb.out.Output(2, s)
+ }
+ if fb.log != nil {
+ fb.log.Output(2, s)
+ }
}
diff --git a/vendor/github.com/spf13/pflag/README.md b/vendor/github.com/spf13/pflag/README.md
index eefb46d..b052414 100644
--- a/vendor/github.com/spf13/pflag/README.md
+++ b/vendor/github.com/spf13/pflag/README.md
@@ -246,6 +246,25 @@ It is possible to mark a flag as hidden, meaning it will still function as norma
flags.MarkHidden("secretFlag")
```
+## Disable sorting of flags
+`pflag` allows you to disable sorting of flags for help and usage message.
+
+**Example**:
+```go
+flags.BoolP("verbose", "v", false, "verbose output")
+flags.String("coolflag", "yeaah", "it's really cool flag")
+flags.Int("usefulflag", 777, "sometimes it's very useful")
+flags.SortFlags = false
+flags.PrintDefaults()
+```
+**Output**:
+```
+ -v, --verbose verbose output
+ --coolflag string it's really cool flag (default "yeaah")
+ --usefulflag int sometimes it's very useful (default 777)
+```
+
+
## Supporting Go flags when using pflag
In order to support flags defined using Go's `flag` package, they must be added to the `pflag` flagset. This is usually necessary
to support flags defined by third-party dependencies (e.g. `golang/glog`).
@@ -270,8 +289,8 @@ func main() {
You can see the full reference documentation of the pflag package
[at godoc.org][3], or through go's standard documentation system by
running `godoc -http=:6060` and browsing to
-[http://localhost:6060/pkg/github.com/ogier/pflag][2] after
+[http://localhost:6060/pkg/github.com/spf13/pflag][2] after
installation.
-[2]: http://localhost:6060/pkg/github.com/ogier/pflag
-[3]: http://godoc.org/github.com/ogier/pflag
+[2]: http://localhost:6060/pkg/github.com/spf13/pflag
+[3]: http://godoc.org/github.com/spf13/pflag
diff --git a/vendor/github.com/spf13/pflag/count.go b/vendor/github.com/spf13/pflag/count.go
index d22be41..aa126e4 100644
--- a/vendor/github.com/spf13/pflag/count.go
+++ b/vendor/github.com/spf13/pflag/count.go
@@ -11,13 +11,13 @@ func newCountValue(val int, p *int) *countValue {
}
func (i *countValue) Set(s string) error {
- v, err := strconv.ParseInt(s, 0, 64)
- // -1 means that no specific value was passed, so increment
- if v == -1 {
+ // "+1" means that no specific value was passed, so increment
+ if s == "+1" {
*i = countValue(*i + 1)
- } else {
- *i = countValue(v)
+ return nil
}
+ v, err := strconv.ParseInt(s, 0, 0)
+ *i = countValue(v)
return err
}
@@ -54,7 +54,7 @@ func (f *FlagSet) CountVar(p *int, name string, usage string) {
// CountVarP is like CountVar only take a shorthand for the flag name.
func (f *FlagSet) CountVarP(p *int, name, shorthand string, usage string) {
flag := f.VarPF(newCountValue(0, p), name, shorthand, usage)
- flag.NoOptDefVal = "-1"
+ flag.NoOptDefVal = "+1"
}
// CountVar like CountVar only the flag is placed on the CommandLine instead of a given flag set
@@ -83,7 +83,9 @@ func (f *FlagSet) CountP(name, shorthand string, usage string) *int {
return p
}
-// Count like Count only the flag is placed on the CommandLine isntead of a given flag set
+// Count defines a count flag with specified name, default value, and usage string.
+// The return value is the address of an int variable that stores the value of the flag.
+// A count flag will add 1 to its value evey time it is found on the command line
func Count(name string, usage string) *int {
return CommandLine.CountP(name, "", usage)
}
diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go
index 3a2e255..187e4c9 100644
--- a/vendor/github.com/spf13/pflag/flag.go
+++ b/vendor/github.com/spf13/pflag/flag.go
@@ -16,9 +16,9 @@ pflag is a drop-in replacement of Go's native flag package. If you import
pflag under the name "flag" then all code should continue to function
with no changes.
- import flag "github.com/ogier/pflag"
+ import flag "github.com/spf13/pflag"
- There is one exception to this: if you directly instantiate the Flag struct
+There is one exception to this: if you directly instantiate the Flag struct
there is one more field "Shorthand" that you will need to set.
Most code never instantiates this struct directly, and instead uses
functions such as String(), BoolVar(), and Var(), and is therefore
@@ -142,12 +142,13 @@ type FlagSet struct {
parsed bool
actual map[NormalizedName]*Flag
orderedActual []*Flag
+ sortedActual []*Flag
formal map[NormalizedName]*Flag
orderedFormal []*Flag
+ sortedFormal []*Flag
shorthands map[byte]*Flag
args []string // arguments after flags
argsLenAtDash int // len(args) when a '--' was located when parsing, or -1 if no --
- exitOnError bool // does the program exit if there's an error?
errorHandling ErrorHandling
output io.Writer // nil means stderr; use out() accessor
interspersed bool // allow interspersed option/non-option args
@@ -200,12 +201,19 @@ func sortFlags(flags map[NormalizedName]*Flag) []*Flag {
// "--getUrl" which may also be translated to "geturl" and everything will work.
func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) {
f.normalizeNameFunc = n
- for k, v := range f.orderedFormal {
- delete(f.formal, NormalizedName(v.Name))
- nname := f.normalizeFlagName(v.Name)
- v.Name = string(nname)
- f.formal[nname] = v
- f.orderedFormal[k] = v
+ f.sortedFormal = f.sortedFormal[:0]
+ for fname, flag := range f.formal {
+ nname := f.normalizeFlagName(flag.Name)
+ if fname == nname {
+ continue
+ }
+ flag.Name = string(nname)
+ delete(f.formal, fname)
+ f.formal[nname] = flag
+ if _, set := f.actual[fname]; set {
+ delete(f.actual, fname)
+ f.actual[nname] = flag
+ }
}
}
@@ -240,9 +248,16 @@ func (f *FlagSet) SetOutput(output io.Writer) {
// in primordial order if f.SortFlags is false, calling fn for each.
// It visits all flags, even those not set.
func (f *FlagSet) VisitAll(fn func(*Flag)) {
+ if len(f.formal) == 0 {
+ return
+ }
+
var flags []*Flag
if f.SortFlags {
- flags = sortFlags(f.formal)
+ if len(f.formal) != len(f.sortedFormal) {
+ f.sortedFormal = sortFlags(f.formal)
+ }
+ flags = f.sortedFormal
} else {
flags = f.orderedFormal
}
@@ -279,9 +294,16 @@ func VisitAll(fn func(*Flag)) {
// in primordial order if f.SortFlags is false, calling fn for each.
// It visits only those flags that have been set.
func (f *FlagSet) Visit(fn func(*Flag)) {
+ if len(f.actual) == 0 {
+ return
+ }
+
var flags []*Flag
if f.SortFlags {
- flags = sortFlags(f.actual)
+ if len(f.actual) != len(f.sortedActual) {
+ f.sortedActual = sortFlags(f.actual)
+ }
+ flags = f.sortedActual
} else {
flags = f.orderedActual
}
@@ -303,6 +325,22 @@ func (f *FlagSet) Lookup(name string) *Flag {
return f.lookup(f.normalizeFlagName(name))
}
+// ShorthandLookup returns the Flag structure of the short handed flag,
+// returning nil if none exists.
+// It panics, if len(name) > 1.
+func (f *FlagSet) ShorthandLookup(name string) *Flag {
+ if name == "" {
+ return nil
+ }
+ if len(name) > 1 {
+ msg := fmt.Sprintf("can not look up shorthand which is more than one ASCII character: %q", name)
+ fmt.Fprintf(f.out(), msg)
+ panic(msg)
+ }
+ c := name[0]
+ return f.shorthands[c]
+}
+
// lookup returns the Flag structure of the named flag, returning nil if none exists.
func (f *FlagSet) lookup(name NormalizedName) *Flag {
return f.formal[name]
@@ -344,7 +382,7 @@ func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error {
if flag == nil {
return fmt.Errorf("flag %q does not exist", name)
}
- if len(usageMessage) == 0 {
+ if usageMessage == "" {
return fmt.Errorf("deprecated message for flag %q must be set", name)
}
flag.Deprecated = usageMessage
@@ -359,7 +397,7 @@ func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) erro
if flag == nil {
return fmt.Errorf("flag %q does not exist", name)
}
- if len(usageMessage) == 0 {
+ if usageMessage == "" {
return fmt.Errorf("deprecated message for flag %q must be set", name)
}
flag.ShorthandDeprecated = usageMessage
@@ -383,6 +421,12 @@ func Lookup(name string) *Flag {
return CommandLine.Lookup(name)
}
+// ShorthandLookup returns the Flag structure of the short handed flag,
+// returning nil if none exists.
+func ShorthandLookup(name string) *Flag {
+ return CommandLine.ShorthandLookup(name)
+}
+
// Set sets the value of the named flag.
func (f *FlagSet) Set(name, value string) error {
normalName := f.normalizeFlagName(name)
@@ -390,18 +434,30 @@ func (f *FlagSet) Set(name, value string) error {
if !ok {
return fmt.Errorf("no such flag -%v", name)
}
+
err := flag.Value.Set(value)
if err != nil {
- return err
+ var flagName string
+ if flag.Shorthand != "" && flag.ShorthandDeprecated == "" {
+ flagName = fmt.Sprintf("-%s, --%s", flag.Shorthand, flag.Name)
+ } else {
+ flagName = fmt.Sprintf("--%s", flag.Name)
+ }
+ return fmt.Errorf("invalid argument %q for %q flag: %v", value, flagName, err)
}
- if f.actual == nil {
- f.actual = make(map[NormalizedName]*Flag)
+
+ if !flag.Changed {
+ if f.actual == nil {
+ f.actual = make(map[NormalizedName]*Flag)
+ }
+ f.actual[normalName] = flag
+ f.orderedActual = append(f.orderedActual, flag)
+
+ flag.Changed = true
}
- f.actual[normalName] = flag
- f.orderedActual = append(f.orderedActual, flag)
- flag.Changed = true
- if len(flag.Deprecated) > 0 {
- fmt.Fprintf(os.Stderr, "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated)
+
+ if flag.Deprecated != "" {
+ fmt.Fprintf(f.out(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated)
}
return nil
}
@@ -508,6 +564,10 @@ func UnquoteUsage(flag *Flag) (name string, usage string) {
name = "int"
case "uint64":
name = "uint"
+ case "stringSlice":
+ name = "strings"
+ case "intSlice":
+ name = "ints"
}
return
@@ -583,28 +643,28 @@ func wrap(i, w int, s string) string {
// for all flags in the FlagSet. Wrapped to `cols` columns (0 for no
// wrapping)
func (f *FlagSet) FlagUsagesWrapped(cols int) string {
- x := new(bytes.Buffer)
+ buf := new(bytes.Buffer)
lines := make([]string, 0, len(f.formal))
maxlen := 0
f.VisitAll(func(flag *Flag) {
- if len(flag.Deprecated) > 0 || flag.Hidden {
+ if flag.Deprecated != "" || flag.Hidden {
return
}
line := ""
- if len(flag.Shorthand) > 0 && len(flag.ShorthandDeprecated) == 0 {
+ if flag.Shorthand != "" && flag.ShorthandDeprecated == "" {
line = fmt.Sprintf(" -%s, --%s", flag.Shorthand, flag.Name)
} else {
line = fmt.Sprintf(" --%s", flag.Name)
}
varname, usage := UnquoteUsage(flag)
- if len(varname) > 0 {
+ if varname != "" {
line += " " + varname
}
- if len(flag.NoOptDefVal) > 0 {
+ if flag.NoOptDefVal != "" {
switch flag.Value.Type() {
case "string":
line += fmt.Sprintf("[=\"%s\"]", flag.NoOptDefVal)
@@ -612,6 +672,10 @@ func (f *FlagSet) FlagUsagesWrapped(cols int) string {
if flag.NoOptDefVal != "true" {
line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
}
+ case "count":
+ if flag.NoOptDefVal != "+1" {
+ line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
+ }
default:
line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
}
@@ -640,10 +704,10 @@ func (f *FlagSet) FlagUsagesWrapped(cols int) string {
sidx := strings.Index(line, "\x00")
spacing := strings.Repeat(" ", maxlen-sidx)
// maxlen + 2 comes from + 1 for the \x00 and + 1 for the (deliberate) off-by-one in maxlen-sidx
- fmt.Fprintln(x, line[:sidx], spacing, wrap(maxlen+2, cols, line[sidx+1:]))
+ fmt.Fprintln(buf, line[:sidx], spacing, wrap(maxlen+2, cols, line[sidx+1:]))
}
- return x.String()
+ return buf.String()
}
// FlagUsages returns a string containing the usage information for all flags in
@@ -740,11 +804,10 @@ func (f *FlagSet) VarP(value Value, name, shorthand, usage string) {
// AddFlag will add the flag to the FlagSet
func (f *FlagSet) AddFlag(flag *Flag) {
- // Call normalizeFlagName function only once
normalizedFlagName := f.normalizeFlagName(flag.Name)
- _, alreadythere := f.formal[normalizedFlagName]
- if alreadythere {
+ _, alreadyThere := f.formal[normalizedFlagName]
+ if alreadyThere {
msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name)
fmt.Fprintln(f.out(), msg)
panic(msg) // Happens only if flags are declared with identical names
@@ -757,27 +820,29 @@ func (f *FlagSet) AddFlag(flag *Flag) {
f.formal[normalizedFlagName] = flag
f.orderedFormal = append(f.orderedFormal, flag)
- if len(flag.Shorthand) == 0 {
+ if flag.Shorthand == "" {
return
}
if len(flag.Shorthand) > 1 {
- fmt.Fprintf(f.out(), "%s shorthand more than ASCII character: %s\n", f.name, flag.Shorthand)
- panic("shorthand is more than one character")
+ msg := fmt.Sprintf("%q shorthand is more than one ASCII character", flag.Shorthand)
+ fmt.Fprintf(f.out(), msg)
+ panic(msg)
}
if f.shorthands == nil {
f.shorthands = make(map[byte]*Flag)
}
c := flag.Shorthand[0]
- old, alreadythere := f.shorthands[c]
- if alreadythere {
- fmt.Fprintf(f.out(), "%s shorthand reused: %q for %s already used for %s\n", f.name, c, flag.Name, old.Name)
- panic("shorthand redefinition")
+ used, alreadyThere := f.shorthands[c]
+ if alreadyThere {
+ msg := fmt.Sprintf("unable to redefine %q shorthand in %q flagset: it's already used for %q flag", c, f.name, used.Name)
+ fmt.Fprintf(f.out(), msg)
+ panic(msg)
}
f.shorthands[c] = flag
}
// AddFlagSet adds one FlagSet to another. If a flag is already present in f
-// the flag from newSet will be ignored
+// the flag from newSet will be ignored.
func (f *FlagSet) AddFlagSet(newSet *FlagSet) {
if newSet == nil {
return
@@ -825,35 +890,6 @@ func (f *FlagSet) usage() {
}
}
-func (f *FlagSet) setFlag(flag *Flag, value string, origArg string) error {
- if err := flag.Value.Set(value); err != nil {
- return f.failf("invalid argument %q for %s: %v", value, origArg, err)
- }
- // mark as visited for Visit()
- if f.actual == nil {
- f.actual = make(map[NormalizedName]*Flag)
- }
- f.actual[f.normalizeFlagName(flag.Name)] = flag
- f.orderedActual = append(f.orderedActual, flag)
- flag.Changed = true
- if len(flag.Deprecated) > 0 {
- fmt.Fprintf(os.Stderr, "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated)
- }
- if len(flag.ShorthandDeprecated) > 0 && containsShorthand(origArg, flag.Shorthand) {
- fmt.Fprintf(os.Stderr, "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated)
- }
- return nil
-}
-
-func containsShorthand(arg, shorthand string) bool {
- // filter out flags --<flag_name>
- if strings.HasPrefix(arg, "-") {
- return false
- }
- arg = strings.SplitN(arg, "=", 2)[0]
- return strings.Contains(arg, shorthand)
-}
-
func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []string, err error) {
a = args
name := s[2:]
@@ -861,10 +897,11 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin
err = f.failf("bad flag syntax: %s", s)
return
}
+
split := strings.SplitN(name, "=", 2)
name = split[0]
- flag, alreadythere := f.formal[f.normalizeFlagName(name)]
- if !alreadythere {
+ flag, exists := f.formal[f.normalizeFlagName(name)]
+ if !exists {
if name == "help" { // special case for nice help message.
f.usage()
return a, ErrHelp
@@ -872,11 +909,12 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin
err = f.failf("unknown flag: --%s", name)
return
}
+
var value string
if len(split) == 2 {
// '--flag=arg'
value = split[1]
- } else if len(flag.NoOptDefVal) > 0 {
+ } else if flag.NoOptDefVal != "" {
// '--flag' (arg was optional)
value = flag.NoOptDefVal
} else if len(a) > 0 {
@@ -888,7 +926,11 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin
err = f.failf("flag needs an argument: %s", s)
return
}
- err = fn(flag, value, s)
+
+ err = fn(flag, value)
+ if err != nil {
+ f.failf(err.Error())
+ }
return
}
@@ -896,38 +938,52 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse
if strings.HasPrefix(shorthands, "test.") {
return
}
+
outArgs = args
outShorts = shorthands[1:]
c := shorthands[0]
- flag, alreadythere := f.shorthands[c]
- if !alreadythere {
+ flag, exists := f.shorthands[c]
+ if !exists {
if c == 'h' { // special case for nice help message.
f.usage()
err = ErrHelp
return
}
- //TODO continue on error
err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands)
return
}
+
var value string
if len(shorthands) > 2 && shorthands[1] == '=' {
+ // '-f=arg'
value = shorthands[2:]
outShorts = ""
- } else if len(flag.NoOptDefVal) > 0 {
+ } else if flag.NoOptDefVal != "" {
+ // '-f' (arg was optional)
value = flag.NoOptDefVal
} else if len(shorthands) > 1 {
+ // '-farg'
value = shorthands[1:]
outShorts = ""
} else if len(args) > 0 {
+ // '-f arg'
value = args[0]
outArgs = args[1:]
} else {
+ // '-f' (arg was required)
err = f.failf("flag needs an argument: %q in -%s", c, shorthands)
return
}
- err = fn(flag, value, shorthands)
+
+ if flag.ShorthandDeprecated != "" {
+ fmt.Fprintf(f.out(), "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated)
+ }
+
+ err = fn(flag, value)
+ if err != nil {
+ f.failf(err.Error())
+ }
return
}
@@ -935,6 +991,7 @@ func (f *FlagSet) parseShortArg(s string, args []string, fn parseFunc) (a []stri
a = args
shorthands := s[1:]
+ // "shorthands" can be a series of shorthand letters of flags (e.g. "-vvv").
for len(shorthands) > 0 {
shorthands, a, err = f.parseSingleShortArg(shorthands, args, fn)
if err != nil {
@@ -982,13 +1039,18 @@ func (f *FlagSet) parseArgs(args []string, fn parseFunc) (err error) {
// The return value will be ErrHelp if -help was set but not defined.
func (f *FlagSet) Parse(arguments []string) error {
f.parsed = true
+
+ if len(arguments) < 0 {
+ return nil
+ }
+
f.args = make([]string, 0, len(arguments))
- assign := func(flag *Flag, value, origArg string) error {
- return f.setFlag(flag, value, origArg)
+ set := func(flag *Flag, value string) error {
+ return f.Set(flag.Name, value)
}
- err := f.parseArgs(arguments, assign)
+ err := f.parseArgs(arguments, set)
if err != nil {
switch f.errorHandling {
case ContinueOnError:
@@ -1002,7 +1064,7 @@ func (f *FlagSet) Parse(arguments []string) error {
return nil
}
-type parseFunc func(flag *Flag, value, origArg string) error
+type parseFunc func(flag *Flag, value string) error
// ParseAll parses flag definitions from the argument list, which should not
// include the command name. The arguments for fn are flag and value. Must be
@@ -1013,11 +1075,7 @@ func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string)
f.parsed = true
f.args = make([]string, 0, len(arguments))
- assign := func(flag *Flag, value, origArg string) error {
- return fn(flag, value)
- }
-
- err := f.parseArgs(arguments, assign)
+ err := f.parseArgs(arguments, fn)
if err != nil {
switch f.errorHandling {
case ContinueOnError:
diff --git a/vendor/github.com/spf13/pflag/int16.go b/vendor/github.com/spf13/pflag/int16.go
new file mode 100644
index 0000000..f1a01d0
--- /dev/null
+++ b/vendor/github.com/spf13/pflag/int16.go
@@ -0,0 +1,88 @@
+package pflag
+
+import "strconv"
+
+// -- int16 Value
+type int16Value int16
+
+func newInt16Value(val int16, p *int16) *int16Value {
+ *p = val
+ return (*int16Value)(p)
+}
+
+func (i *int16Value) Set(s string) error {
+ v, err := strconv.ParseInt(s, 0, 16)
+ *i = int16Value(v)
+ return err
+}
+
+func (i *int16Value) Type() string {
+ return "int16"
+}
+
+func (i *int16Value) String() string { return strconv.FormatInt(int64(*i), 10) }
+
+func int16Conv(sval string) (interface{}, error) {
+ v, err := strconv.ParseInt(sval, 0, 16)
+ if err != nil {
+ return 0, err
+ }
+ return int16(v), nil
+}
+
+// GetInt16 returns the int16 value of a flag with the given name
+func (f *FlagSet) GetInt16(name string) (int16, error) {
+ val, err := f.getFlagType(name, "int16", int16Conv)
+ if err != nil {
+ return 0, err
+ }
+ return val.(int16), nil
+}
+
+// Int16Var defines an int16 flag with specified name, default value, and usage string.
+// The argument p points to an int16 variable in which to store the value of the flag.
+func (f *FlagSet) Int16Var(p *int16, name string, value int16, usage string) {
+ f.VarP(newInt16Value(value, p), name, "", usage)
+}
+
+// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int16VarP(p *int16, name, shorthand string, value int16, usage string) {
+ f.VarP(newInt16Value(value, p), name, shorthand, usage)
+}
+
+// Int16Var defines an int16 flag with specified name, default value, and usage string.
+// The argument p points to an int16 variable in which to store the value of the flag.
+func Int16Var(p *int16, name string, value int16, usage string) {
+ CommandLine.VarP(newInt16Value(value, p), name, "", usage)
+}
+
+// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash.
+func Int16VarP(p *int16, name, shorthand string, value int16, usage string) {
+ CommandLine.VarP(newInt16Value(value, p), name, shorthand, usage)
+}
+
+// Int16 defines an int16 flag with specified name, default value, and usage string.
+// The return value is the address of an int16 variable that stores the value of the flag.
+func (f *FlagSet) Int16(name string, value int16, usage string) *int16 {
+ p := new(int16)
+ f.Int16VarP(p, name, "", value, usage)
+ return p
+}
+
+// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash.
+func (f *FlagSet) Int16P(name, shorthand string, value int16, usage string) *int16 {
+ p := new(int16)
+ f.Int16VarP(p, name, shorthand, value, usage)
+ return p
+}
+
+// Int16 defines an int16 flag with specified name, default value, and usage string.
+// The return value is the address of an int16 variable that stores the value of the flag.
+func Int16(name string, value int16, usage string) *int16 {
+ return CommandLine.Int16P(name, "", value, usage)
+}
+
+// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash.
+func Int16P(name, shorthand string, value int16, usage string) *int16 {
+ return CommandLine.Int16P(name, shorthand, value, usage)
+}
diff --git a/vendor/github.com/spf13/viper/README.md b/vendor/github.com/spf13/viper/README.md
index 25181df..848d92d 100644
--- a/vendor/github.com/spf13/viper/README.md
+++ b/vendor/github.com/spf13/viper/README.md
@@ -6,18 +6,19 @@ Many Go projects are built using Viper including:
* [Hugo](http://gohugo.io)
* [EMC RexRay](http://rexray.readthedocs.org/en/stable/)
-* [Imgur's Incus](https://github.com/Imgur/incus)
+* [Imgur’s Incus](https://github.com/Imgur/incus)
* [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack)
* [Docker Notary](https://github.com/docker/Notary)
* [BloomApi](https://www.bloomapi.com/)
* [doctl](https://github.com/digitalocean/doctl)
+* [Clairctl](https://github.com/jgsqware/clairctl)
[![Build Status](https://travis-ci.org/spf13/viper.svg)](https://travis-ci.org/spf13/viper) [![Join the chat at https://gitter.im/spf13/viper](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/spf13/viper?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![GoDoc](https://godoc.org/github.com/spf13/viper?status.svg)](https://godoc.org/github.com/spf13/viper)
## What is Viper?
-Viper is a complete configuration solution for go applications including 12 factor apps. It is designed
+Viper is a complete configuration solution for Go applications including 12-Factor apps. It is designed
to work within an application, and can handle all types of configuration needs
and formats. It supports:
@@ -68,7 +69,7 @@ Viper configuration keys are case insensitive.
### Establishing Defaults
A good configuration system will support default values. A default value is not
-required for a key, but it's useful in the event that a key hasn’t been set via
+required for a key, but it’s useful in the event that a key hasn’t been set via
config file, environment variable, remote configuration or flag.
Examples:
@@ -116,10 +117,10 @@ Optionally you can provide a function for Viper to run each time a change occurs
**Make sure you add all of the configPaths prior to calling `WatchConfig()`**
```go
- viper.WatchConfig()
- viper.OnConfigChange(func(e fsnotify.Event) {
- fmt.Println("Config file changed:", e.Name)
- })
+viper.WatchConfig()
+viper.OnConfigChange(func(e fsnotify.Event) {
+ fmt.Println("Config file changed:", e.Name)
+})
```
### Reading Config from io.Reader
@@ -236,7 +237,7 @@ Like `BindEnv`, the value is not set when the binding method is called, but when
it is accessed. This means you can bind as early as you want, even in an
`init()` function.
-The `BindPFlag()` method provides this functionality.
+For individual flags, the `BindPFlag()` method provides this functionality.
Example:
@@ -245,6 +246,19 @@ serverCmd.Flags().Int("port", 1138, "Port to run Application server on")
viper.BindPFlag("port", serverCmd.Flags().Lookup("port"))
```
+You can also bind an existing set of pflags (pflag.FlagSet):
+
+Example:
+
+```go
+pflag.Int("flagname", 1234, "help message for flagname")
+
+pflag.Parse()
+viper.BindPFlags(pflag.CommandLine)
+
+i := viper.GetInt("flagname") // retrieve values from viper instead of pflag
+```
+
The use of [pflag](https://github.com/spf13/pflag/) in Viper does not preclude
the use of other packages that use the [flag](https://golang.org/pkg/flag/)
package from the standard library. The pflag package can handle the flags
@@ -263,15 +277,23 @@ import (
)
func main() {
+
+ // using standard library "flag" package
+ flag.Int("flagname", 1234, "help message for flagname")
+
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
pflag.Parse()
- ...
+ viper.BindPFlags(pflag.CommandLine)
+
+ i := viper.GetInt("flagname") // retrieve value from viper
+
+ ...
}
```
#### Flag interfaces
-Viper provides two Go interfaces to bind other flag systems if you don't use `Pflags`.
+Viper provides two Go interfaces to bind other flag systems if you don’t use `Pflags`.
`FlagValue` represents a single flag. This is a very simple example on how to implement this interface:
@@ -401,7 +423,7 @@ go func(){
## Getting Values From Viper
-In Viper, there are a few ways to get a value depending on the value's type.
+In Viper, there are a few ways to get a value depending on the value’s type.
The following functions and methods exist:
* `Get(key string) : interface{}`
@@ -531,7 +553,7 @@ func NewCache(cfg *Viper) *Cache {...}
```
which creates a cache based on config information formatted as `subv`.
-Now it's easy to create these 2 caches separately as:
+Now it’s easy to create these 2 caches separately as:
```go
cfg1 := viper.Sub("app.cache1")
@@ -575,13 +597,13 @@ initialization needed to begin using Viper. Since most applications will want
to use a single central repository for their configuration, the viper package
provides this. It is similar to a singleton.
-In all of the examples above, they demonstrate using viper in it's singleton
+In all of the examples above, they demonstrate using viper in its singleton
style approach.
### Working with multiple vipers
You can also create many different vipers for use in your application. Each will
-have it’s own unique set of configurations and values. Each can read from a
+have its own unique set of configurations and values. Each can read from a
different config file, key value store, etc. All of the functions that viper
package supports are mirrored as methods on a viper.
diff --git a/vendor/github.com/spf13/viper/viper.go b/vendor/github.com/spf13/viper/viper.go
index 5ca66ae..963861a 100644
--- a/vendor/github.com/spf13/viper/viper.go
+++ b/vendor/github.com/spf13/viper/viper.go
@@ -21,6 +21,7 @@ package viper
import (
"bytes"
+ "encoding/csv"
"fmt"
"io"
"log"
@@ -52,7 +53,7 @@ func init() {
type remoteConfigFactory interface {
Get(rp RemoteProvider) (io.Reader, error)
Watch(rp RemoteProvider) (io.Reader, error)
- WatchChannel(rp RemoteProvider)(<-chan *RemoteResponse, chan bool)
+ WatchChannel(rp RemoteProvider) (<-chan *RemoteResponse, chan bool)
}
// RemoteConfig is optional, see the remote package
@@ -68,8 +69,7 @@ func (str UnsupportedConfigError) Error() string {
}
// UnsupportedRemoteProviderError denotes encountering an unsupported remote
-// provider. Currently only etcd and Consul are
-// supported.
+// provider. Currently only etcd and Consul are supported.
type UnsupportedRemoteProviderError string
// Error returns the formatted remote provider error.
@@ -282,8 +282,8 @@ func (v *Viper) WatchConfig() {
}()
}
-// SetConfigFile explicitly defines the path, name and extension of the config file
-// Viper will use this and not check any of the config paths
+// SetConfigFile explicitly defines the path, name and extension of the config file.
+// Viper will use this and not check any of the config paths.
func SetConfigFile(in string) { v.SetConfigFile(in) }
func (v *Viper) SetConfigFile(in string) {
if in != "" {
@@ -292,8 +292,8 @@ func (v *Viper) SetConfigFile(in string) {
}
// SetEnvPrefix defines a prefix that ENVIRONMENT variables will use.
-// E.g. if your prefix is "spf", the env registry
-// will look for env. variables that start with "SPF_"
+// E.g. if your prefix is "spf", the env registry will look for env
+// variables that start with "SPF_".
func SetEnvPrefix(in string) { v.SetEnvPrefix(in) }
func (v *Viper) SetEnvPrefix(in string) {
if in != "" {
@@ -311,11 +311,11 @@ func (v *Viper) mergeWithEnvPrefix(in string) string {
// TODO: should getEnv logic be moved into find(). Can generalize the use of
// rewriting keys many things, Ex: Get('someKey') -> some_key
-// (cammel case to snake case for JSON keys perhaps)
+// (camel case to snake case for JSON keys perhaps)
// getEnv is a wrapper around os.Getenv which replaces characters in the original
-// key. This allows env vars which have different keys then the config object
-// keys
+// key. This allows env vars which have different keys than the config object
+// keys.
func (v *Viper) getEnv(key string) string {
if v.envKeyReplacer != nil {
key = v.envKeyReplacer.Replace(key)
@@ -323,7 +323,7 @@ func (v *Viper) getEnv(key string) string {
return os.Getenv(key)
}
-// ConfigFileUsed returns the file used to populate the config registry
+// ConfigFileUsed returns the file used to populate the config registry.
func ConfigFileUsed() string { return v.ConfigFileUsed() }
func (v *Viper) ConfigFileUsed() string { return v.configFile }
@@ -596,32 +596,33 @@ func (v *Viper) Get(key string) interface{} {
return nil
}
- valType := val
if v.typeByDefValue {
// TODO(bep) this branch isn't covered by a single test.
+ valType := val
path := strings.Split(lcaseKey, v.keyDelim)
defVal := v.searchMap(v.defaults, path)
if defVal != nil {
valType = defVal
}
- }
- switch valType.(type) {
- case bool:
- return cast.ToBool(val)
- case string:
- return cast.ToString(val)
- case int64, int32, int16, int8, int:
- return cast.ToInt(val)
- case float64, float32:
- return cast.ToFloat64(val)
- case time.Time:
- return cast.ToTime(val)
- case time.Duration:
- return cast.ToDuration(val)
- case []string:
- return cast.ToStringSlice(val)
+ switch valType.(type) {
+ case bool:
+ return cast.ToBool(val)
+ case string:
+ return cast.ToString(val)
+ case int64, int32, int16, int8, int:
+ return cast.ToInt(val)
+ case float64, float32:
+ return cast.ToFloat64(val)
+ case time.Time:
+ return cast.ToTime(val)
+ case time.Duration:
+ return cast.ToDuration(val)
+ case []string:
+ return cast.ToStringSlice(val)
+ }
}
+
return val
}
@@ -746,13 +747,16 @@ func (v *Viper) Unmarshal(rawVal interface{}) error {
}
// defaultDecoderConfig returns default mapsstructure.DecoderConfig with suppot
-// of time.Duration values
+// of time.Duration values & string slices
func defaultDecoderConfig(output interface{}) *mapstructure.DecoderConfig {
return &mapstructure.DecoderConfig{
Metadata: nil,
Result: output,
WeaklyTypedInput: true,
- DecodeHook: mapstructure.StringToTimeDurationHookFunc(),
+ DecodeHook: mapstructure.ComposeDecodeHookFunc(
+ mapstructure.StringToTimeDurationHookFunc(),
+ mapstructure.StringToSliceHookFunc(","),
+ ),
}
}
@@ -813,7 +817,7 @@ func (v *Viper) BindFlagValues(flags FlagValueSet) (err error) {
}
// BindFlagValue binds a specific key to a FlagValue.
-// Example(where serverCmd is a Cobra instance):
+// Example (where serverCmd is a Cobra instance):
//
// serverCmd.Flags().Int("port", 1138, "Port to run Application server on")
// Viper.BindFlagValue("port", serverCmd.Flags().Lookup("port"))
@@ -894,7 +898,9 @@ func (v *Viper) find(lcaseKey string) interface{} {
return cast.ToBool(flag.ValueString())
case "stringSlice":
s := strings.TrimPrefix(flag.ValueString(), "[")
- return strings.TrimSuffix(s, "]")
+ s = strings.TrimSuffix(s, "]")
+ res, _ := readAsCSV(s)
+ return res
default:
return flag.ValueString()
}
@@ -961,7 +967,9 @@ func (v *Viper) find(lcaseKey string) interface{} {
return cast.ToBool(flag.ValueString())
case "stringSlice":
s := strings.TrimPrefix(flag.ValueString(), "[")
- return strings.TrimSuffix(s, "]")
+ s = strings.TrimSuffix(s, "]")
+ res, _ := readAsCSV(s)
+ return res
default:
return flag.ValueString()
}
@@ -971,6 +979,15 @@ func (v *Viper) find(lcaseKey string) interface{} {
return nil
}
+func readAsCSV(val string) ([]string, error) {
+ if val == "" {
+ return []string{}, nil
+ }
+ stringReader := strings.NewReader(val)
+ csvReader := csv.NewReader(stringReader)
+ return csvReader.Read()
+}
+
// IsSet checks to see if the key has been set in any of the data locations.
// IsSet is case-insensitive for a key.
func IsSet(key string) bool { return v.IsSet(key) }
@@ -1273,7 +1290,7 @@ func (v *Viper) WatchRemoteConfigOnChannel() error {
return v.watchKeyValueConfigOnChannel()
}
-// Unmarshall a Reader into a map.
+// Unmarshal a Reader into a map.
// Should probably be an unexported function.
func unmarshalReader(in io.Reader, c map[string]interface{}) error {
return v.unmarshalReader(in, c)
@@ -1532,7 +1549,6 @@ func (v *Viper) searchInPath(in string) (filename string) {
// Search all configPaths for any config file.
// Returns the first path that exists (and is a config file).
func (v *Viper) findConfigFile() (string, error) {
-
jww.INFO.Println("Searching for config in ", v.configPaths)
for _, cp := range v.configPaths {
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go b/vendor/github.com/stretchr/testify/assert/assertion_format.go
new file mode 100644
index 0000000..23838c4
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go
@@ -0,0 +1,379 @@
+/*
+* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
+* THIS FILE MUST NOT BE EDITED BY HAND
+ */
+
+package assert
+
+import (
+ http "net/http"
+ url "net/url"
+ time "time"
+)
+
+// Conditionf uses a Comparison to assert a complex condition.
+func Conditionf(t TestingT, comp Comparison, msg string, args ...interface{}) bool {
+ return Condition(t, comp, append([]interface{}{msg}, args...)...)
+}
+
+// Containsf asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+//
+// assert.Containsf(t, "Hello World", "World", "error message %s", "formatted")
+// assert.Containsf(t, ["Hello", "World"], "World", "error message %s", "formatted")
+// assert.Containsf(t, {"Hello": "World"}, "Hello", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Containsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool {
+ return Contains(t, s, contains, append([]interface{}{msg}, args...)...)
+}
+
+// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// assert.Emptyf(t, obj, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ return Empty(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// Equalf asserts that two objects are equal.
+//
+// assert.Equalf(t, 123, 123, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
+func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ return Equal(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// EqualErrorf asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+// actualObj, err := SomeFunction()
+// assert.EqualErrorf(t, err, expectedErrorString, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func EqualErrorf(t TestingT, theError error, errString string, msg string, args ...interface{}) bool {
+ return EqualError(t, theError, errString, append([]interface{}{msg}, args...)...)
+}
+
+// EqualValuesf asserts that two objects are equal or convertable to the same types
+// and equal.
+//
+// assert.EqualValuesf(t, uint32(123, "error message %s", "formatted"), int32(123))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ return EqualValues(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// Errorf asserts that a function returned an error (i.e. not `nil`).
+//
+// actualObj, err := SomeFunction()
+// if assert.Errorf(t, err, "error message %s", "formatted") {
+// assert.Equal(t, expectedErrorf, err)
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Errorf(t TestingT, err error, msg string, args ...interface{}) bool {
+ return Error(t, err, append([]interface{}{msg}, args...)...)
+}
+
+// Exactlyf asserts that two objects are equal is value and type.
+//
+// assert.Exactlyf(t, int32(123, "error message %s", "formatted"), int64(123))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Exactlyf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ return Exactly(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// Failf reports a failure through
+func Failf(t TestingT, failureMessage string, msg string, args ...interface{}) bool {
+ return Fail(t, failureMessage, append([]interface{}{msg}, args...)...)
+}
+
+// FailNowf fails test
+func FailNowf(t TestingT, failureMessage string, msg string, args ...interface{}) bool {
+ return FailNow(t, failureMessage, append([]interface{}{msg}, args...)...)
+}
+
+// Falsef asserts that the specified value is false.
+//
+// assert.Falsef(t, myBool, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Falsef(t TestingT, value bool, msg string, args ...interface{}) bool {
+ return False(t, value, append([]interface{}{msg}, args...)...)
+}
+
+// HTTPBodyContainsf asserts that a specified handler returns a
+// body that contains a string.
+//
+// assert.HTTPBodyContainsf(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool {
+ return HTTPBodyContains(t, handler, method, url, values, str)
+}
+
+// HTTPBodyNotContainsf asserts that a specified handler returns a
+// body that does not contain a string.
+//
+// assert.HTTPBodyNotContainsf(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPBodyNotContainsf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool {
+ return HTTPBodyNotContains(t, handler, method, url, values, str)
+}
+
+// HTTPErrorf asserts that a specified handler returns an error status code.
+//
+// assert.HTTPErrorf(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+func HTTPErrorf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values) bool {
+ return HTTPError(t, handler, method, url, values)
+}
+
+// HTTPRedirectf asserts that a specified handler returns a redirect status code.
+//
+// assert.HTTPRedirectf(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+func HTTPRedirectf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values) bool {
+ return HTTPRedirect(t, handler, method, url, values)
+}
+
+// HTTPSuccessf asserts that a specified handler returns a success status code.
+//
+// assert.HTTPSuccessf(t, myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func HTTPSuccessf(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values) bool {
+ return HTTPSuccess(t, handler, method, url, values)
+}
+
+// Implementsf asserts that an object is implemented by the specified interface.
+//
+// assert.Implementsf(t, (*MyInterface, "error message %s", "formatted")(nil), new(MyObject))
+func Implementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
+ return Implements(t, interfaceObject, object, append([]interface{}{msg}, args...)...)
+}
+
+// InDeltaf asserts that the two numerals are within delta of each other.
+//
+// assert.InDeltaf(t, math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func InDeltaf(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+ return InDelta(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
+}
+
+// InDeltaSlicef is the same as InDelta, except it compares two slices.
+func InDeltaSlicef(t TestingT, expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+ return InDeltaSlice(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
+}
+
+// InEpsilonf asserts that expected and actual have a relative error less than epsilon
+//
+// Returns whether the assertion was successful (true) or not (false).
+func InEpsilonf(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
+ return InEpsilon(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...)
+}
+
+// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices.
+func InEpsilonSlicef(t TestingT, expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
+ return InEpsilonSlice(t, expected, actual, epsilon, append([]interface{}{msg}, args...)...)
+}
+
+// IsTypef asserts that the specified objects are of the same type.
+func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
+ return IsType(t, expectedType, object, append([]interface{}{msg}, args...)...)
+}
+
+// JSONEqf asserts that two JSON strings are equivalent.
+//
+// assert.JSONEqf(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func JSONEqf(t TestingT, expected string, actual string, msg string, args ...interface{}) bool {
+ return JSONEq(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// Lenf asserts that the specified object has specific length.
+// Lenf also fails if the object has a type that len() not accept.
+//
+// assert.Lenf(t, mySlice, 3, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Lenf(t TestingT, object interface{}, length int, msg string, args ...interface{}) bool {
+ return Len(t, object, length, append([]interface{}{msg}, args...)...)
+}
+
+// Nilf asserts that the specified object is nil.
+//
+// assert.Nilf(t, err, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Nilf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ return Nil(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// NoErrorf asserts that a function returned no error (i.e. `nil`).
+//
+// actualObj, err := SomeFunction()
+// if assert.NoErrorf(t, err, "error message %s", "formatted") {
+// assert.Equal(t, expectedObj, actualObj)
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NoErrorf(t TestingT, err error, msg string, args ...interface{}) bool {
+ return NoError(t, err, append([]interface{}{msg}, args...)...)
+}
+
+// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+//
+// assert.NotContainsf(t, "Hello World", "Earth", "error message %s", "formatted")
+// assert.NotContainsf(t, ["Hello", "World"], "Earth", "error message %s", "formatted")
+// assert.NotContainsf(t, {"Hello": "World"}, "Earth", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotContainsf(t TestingT, s interface{}, contains interface{}, msg string, args ...interface{}) bool {
+ return NotContains(t, s, contains, append([]interface{}{msg}, args...)...)
+}
+
+// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// if assert.NotEmptyf(t, obj, "error message %s", "formatted") {
+// assert.Equal(t, "two", obj[1])
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotEmptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ return NotEmpty(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// NotEqualf asserts that the specified values are NOT equal.
+//
+// assert.NotEqualf(t, obj1, obj2, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
+func NotEqualf(t TestingT, expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ return NotEqual(t, expected, actual, append([]interface{}{msg}, args...)...)
+}
+
+// NotNilf asserts that the specified object is not nil.
+//
+// assert.NotNilf(t, err, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotNilf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
+ return NotNil(t, object, append([]interface{}{msg}, args...)...)
+}
+
+// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+// assert.NotPanicsf(t, func(){ RemainCalm() }, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotPanicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool {
+ return NotPanics(t, f, append([]interface{}{msg}, args...)...)
+}
+
+// NotRegexpf asserts that a specified regexp does not match a string.
+//
+// assert.NotRegexpf(t, regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting")
+// assert.NotRegexpf(t, "^start", "it's not starting", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotRegexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
+ return NotRegexp(t, rx, str, append([]interface{}{msg}, args...)...)
+}
+
+// NotSubsetf asserts that the specified list(array, slice...) contains not all
+// elements given in the specified subset(array, slice...).
+//
+// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
+ return NotSubset(t, list, subset, append([]interface{}{msg}, args...)...)
+}
+
+// NotZerof asserts that i is not the zero value for its type and returns the truth.
+func NotZerof(t TestingT, i interface{}, msg string, args ...interface{}) bool {
+ return NotZero(t, i, append([]interface{}{msg}, args...)...)
+}
+
+// Panicsf asserts that the code inside the specified PanicTestFunc panics.
+//
+// assert.Panicsf(t, func(){ GoCrazy() }, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Panicsf(t TestingT, f PanicTestFunc, msg string, args ...interface{}) bool {
+ return Panics(t, f, append([]interface{}{msg}, args...)...)
+}
+
+// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+// assert.PanicsWithValuef(t, "crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func PanicsWithValuef(t TestingT, expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool {
+ return PanicsWithValue(t, expected, f, append([]interface{}{msg}, args...)...)
+}
+
+// Regexpf asserts that a specified regexp matches a string.
+//
+// assert.Regexpf(t, regexp.MustCompile("start", "error message %s", "formatted"), "it's starting")
+// assert.Regexpf(t, "start...$", "it's not starting", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Regexpf(t TestingT, rx interface{}, str interface{}, msg string, args ...interface{}) bool {
+ return Regexp(t, rx, str, append([]interface{}{msg}, args...)...)
+}
+
+// Subsetf asserts that the specified list(array, slice...) contains all
+// elements given in the specified subset(array, slice...).
+//
+// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
+ return Subset(t, list, subset, append([]interface{}{msg}, args...)...)
+}
+
+// Truef asserts that the specified value is true.
+//
+// assert.Truef(t, myBool, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Truef(t TestingT, value bool, msg string, args ...interface{}) bool {
+ return True(t, value, append([]interface{}{msg}, args...)...)
+}
+
+// WithinDurationf asserts that the two times are within duration delta of each other.
+//
+// assert.WithinDurationf(t, time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func WithinDurationf(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool {
+ return WithinDuration(t, expected, actual, delta, append([]interface{}{msg}, args...)...)
+}
+
+// Zerof asserts that i is the zero value for its type and returns the truth.
+func Zerof(t TestingT, i interface{}, msg string, args ...interface{}) bool {
+ return Zero(t, i, append([]interface{}{msg}, args...)...)
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl
new file mode 100644
index 0000000..c5cc66f
--- /dev/null
+++ b/vendor/github.com/stretchr/testify/assert/assertion_format.go.tmpl
@@ -0,0 +1,4 @@
+{{.CommentFormat}}
+func {{.DocInfo.Name}}f(t TestingT, {{.ParamsFormat}}) bool {
+ return {{.DocInfo.Name}}(t, {{.ForwardedParamsFormat}})
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
index aa4311f..fcccbd0 100644
--- a/vendor/github.com/stretchr/testify/assert/assertion_forward.go
+++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go
@@ -16,18 +16,35 @@ func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool
return Condition(a.t, comp, msgAndArgs...)
}
+// Conditionf uses a Comparison to assert a complex condition.
+func (a *Assertions) Conditionf(comp Comparison, msg string, args ...interface{}) bool {
+ return Conditionf(a.t, comp, msg, args...)
+}
+
// Contains asserts that the specified string, list(array, slice...) or map contains the
// specified substring or element.
//
-// a.Contains("Hello World", "World", "But 'Hello World' does contain 'World'")
-// a.Contains(["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'")
-// a.Contains({"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'")
+// a.Contains("Hello World", "World")
+// a.Contains(["Hello", "World"], "World")
+// a.Contains({"Hello": "World"}, "Hello")
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {
return Contains(a.t, s, contains, msgAndArgs...)
}
+// Containsf asserts that the specified string, list(array, slice...) or map contains the
+// specified substring or element.
+//
+// a.Containsf("Hello World", "World", "error message %s", "formatted")
+// a.Containsf(["Hello", "World"], "World", "error message %s", "formatted")
+// a.Containsf({"Hello": "World"}, "Hello", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Containsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool {
+ return Containsf(a.t, s, contains, msg, args...)
+}
+
// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either
// a slice or a channel with len == 0.
//
@@ -38,14 +55,25 @@ func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool {
return Empty(a.t, object, msgAndArgs...)
}
+// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// a.Emptyf(obj, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool {
+ return Emptyf(a.t, object, msg, args...)
+}
+
// Equal asserts that two objects are equal.
//
-// a.Equal(123, 123, "123 and 123 should be equal")
+// a.Equal(123, 123)
//
// Returns whether the assertion was successful (true) or not (false).
//
// Pointer variable equality is determined based on the equality of the
-// referenced values (as opposed to the memory addresses).
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
return Equal(a.t, expected, actual, msgAndArgs...)
}
@@ -54,28 +82,62 @@ func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs
// and that it is equal to the provided error.
//
// actualObj, err := SomeFunction()
-// a.EqualError(err, expectedErrorString, "An error was expected")
+// a.EqualError(err, expectedErrorString)
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool {
return EqualError(a.t, theError, errString, msgAndArgs...)
}
+// EqualErrorf asserts that a function returned an error (i.e. not `nil`)
+// and that it is equal to the provided error.
+//
+// actualObj, err := SomeFunction()
+// a.EqualErrorf(err, expectedErrorString, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) EqualErrorf(theError error, errString string, msg string, args ...interface{}) bool {
+ return EqualErrorf(a.t, theError, errString, msg, args...)
+}
+
// EqualValues asserts that two objects are equal or convertable to the same types
// and equal.
//
-// a.EqualValues(uint32(123), int32(123), "123 and 123 should be equal")
+// a.EqualValues(uint32(123), int32(123))
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
return EqualValues(a.t, expected, actual, msgAndArgs...)
}
+// EqualValuesf asserts that two objects are equal or convertable to the same types
+// and equal.
+//
+// a.EqualValuesf(uint32(123, "error message %s", "formatted"), int32(123))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) EqualValuesf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ return EqualValuesf(a.t, expected, actual, msg, args...)
+}
+
+// Equalf asserts that two objects are equal.
+//
+// a.Equalf(123, 123, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
+func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ return Equalf(a.t, expected, actual, msg, args...)
+}
+
// Error asserts that a function returned an error (i.e. not `nil`).
//
// actualObj, err := SomeFunction()
-// if a.Error(err, "An error was expected") {
-// assert.Equal(t, err, expectedError)
+// if a.Error(err) {
+// assert.Equal(t, expectedError, err)
// }
//
// Returns whether the assertion was successful (true) or not (false).
@@ -83,15 +145,36 @@ func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool {
return Error(a.t, err, msgAndArgs...)
}
+// Errorf asserts that a function returned an error (i.e. not `nil`).
+//
+// actualObj, err := SomeFunction()
+// if a.Errorf(err, "error message %s", "formatted") {
+// assert.Equal(t, expectedErrorf, err)
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool {
+ return Errorf(a.t, err, msg, args...)
+}
+
// Exactly asserts that two objects are equal is value and type.
//
-// a.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal")
+// a.Exactly(int32(123), int64(123))
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool {
return Exactly(a.t, expected, actual, msgAndArgs...)
}
+// Exactlyf asserts that two objects are equal is value and type.
+//
+// a.Exactlyf(int32(123, "error message %s", "formatted"), int64(123))
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Exactlyf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ return Exactlyf(a.t, expected, actual, msg, args...)
+}
+
// Fail reports a failure through
func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool {
return Fail(a.t, failureMessage, msgAndArgs...)
@@ -102,15 +185,34 @@ func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) b
return FailNow(a.t, failureMessage, msgAndArgs...)
}
+// FailNowf fails test
+func (a *Assertions) FailNowf(failureMessage string, msg string, args ...interface{}) bool {
+ return FailNowf(a.t, failureMessage, msg, args...)
+}
+
+// Failf reports a failure through
+func (a *Assertions) Failf(failureMessage string, msg string, args ...interface{}) bool {
+ return Failf(a.t, failureMessage, msg, args...)
+}
+
// False asserts that the specified value is false.
//
-// a.False(myBool, "myBool should be false")
+// a.False(myBool)
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool {
return False(a.t, value, msgAndArgs...)
}
+// Falsef asserts that the specified value is false.
+//
+// a.Falsef(myBool, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Falsef(value bool, msg string, args ...interface{}) bool {
+ return Falsef(a.t, value, msg, args...)
+}
+
// HTTPBodyContains asserts that a specified handler returns a
// body that contains a string.
//
@@ -121,6 +223,16 @@ func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, u
return HTTPBodyContains(a.t, handler, method, url, values, str)
}
+// HTTPBodyContainsf asserts that a specified handler returns a
+// body that contains a string.
+//
+// a.HTTPBodyContainsf(myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool {
+ return HTTPBodyContainsf(a.t, handler, method, url, values, str)
+}
+
// HTTPBodyNotContains asserts that a specified handler returns a
// body that does not contain a string.
//
@@ -131,6 +243,16 @@ func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string
return HTTPBodyNotContains(a.t, handler, method, url, values, str)
}
+// HTTPBodyNotContainsf asserts that a specified handler returns a
+// body that does not contain a string.
+//
+// a.HTTPBodyNotContainsf(myHandler, "www.google.com", nil, "I'm Feeling Lucky", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPBodyNotContainsf(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool {
+ return HTTPBodyNotContainsf(a.t, handler, method, url, values, str)
+}
+
// HTTPError asserts that a specified handler returns an error status code.
//
// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
@@ -140,6 +262,15 @@ func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url stri
return HTTPError(a.t, handler, method, url, values)
}
+// HTTPErrorf asserts that a specified handler returns an error status code.
+//
+// a.HTTPErrorf(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+func (a *Assertions) HTTPErrorf(handler http.HandlerFunc, method string, url string, values url.Values) bool {
+ return HTTPErrorf(a.t, handler, method, url, values)
+}
+
// HTTPRedirect asserts that a specified handler returns a redirect status code.
//
// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
@@ -149,6 +280,15 @@ func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url s
return HTTPRedirect(a.t, handler, method, url, values)
}
+// HTTPRedirectf asserts that a specified handler returns a redirect status code.
+//
+// a.HTTPRedirectf(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}}
+//
+// Returns whether the assertion was successful (true, "error message %s", "formatted") or not (false).
+func (a *Assertions) HTTPRedirectf(handler http.HandlerFunc, method string, url string, values url.Values) bool {
+ return HTTPRedirectf(a.t, handler, method, url, values)
+}
+
// HTTPSuccess asserts that a specified handler returns a success status code.
//
// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil)
@@ -158,13 +298,29 @@ func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url st
return HTTPSuccess(a.t, handler, method, url, values)
}
+// HTTPSuccessf asserts that a specified handler returns a success status code.
+//
+// a.HTTPSuccessf(myHandler, "POST", "http://www.google.com", nil, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) HTTPSuccessf(handler http.HandlerFunc, method string, url string, values url.Values) bool {
+ return HTTPSuccessf(a.t, handler, method, url, values)
+}
+
// Implements asserts that an object is implemented by the specified interface.
//
-// a.Implements((*MyInterface)(nil), new(MyObject), "MyObject")
+// a.Implements((*MyInterface)(nil), new(MyObject))
func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
return Implements(a.t, interfaceObject, object, msgAndArgs...)
}
+// Implementsf asserts that an object is implemented by the specified interface.
+//
+// a.Implementsf((*MyInterface, "error message %s", "formatted")(nil), new(MyObject))
+func (a *Assertions) Implementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
+ return Implementsf(a.t, interfaceObject, object, msg, args...)
+}
+
// InDelta asserts that the two numerals are within delta of each other.
//
// a.InDelta(math.Pi, (22 / 7.0), 0.01)
@@ -179,6 +335,20 @@ func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delt
return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...)
}
+// InDeltaSlicef is the same as InDelta, except it compares two slices.
+func (a *Assertions) InDeltaSlicef(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+ return InDeltaSlicef(a.t, expected, actual, delta, msg, args...)
+}
+
+// InDeltaf asserts that the two numerals are within delta of each other.
+//
+// a.InDeltaf(math.Pi, (22 / 7.0, "error message %s", "formatted"), 0.01)
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) InDeltaf(expected interface{}, actual interface{}, delta float64, msg string, args ...interface{}) bool {
+ return InDeltaf(a.t, expected, actual, delta, msg, args...)
+}
+
// InEpsilon asserts that expected and actual have a relative error less than epsilon
//
// Returns whether the assertion was successful (true) or not (false).
@@ -191,11 +361,28 @@ func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, ep
return InEpsilonSlice(a.t, expected, actual, epsilon, msgAndArgs...)
}
+// InEpsilonSlicef is the same as InEpsilon, except it compares each value from two slices.
+func (a *Assertions) InEpsilonSlicef(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
+ return InEpsilonSlicef(a.t, expected, actual, epsilon, msg, args...)
+}
+
+// InEpsilonf asserts that expected and actual have a relative error less than epsilon
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) InEpsilonf(expected interface{}, actual interface{}, epsilon float64, msg string, args ...interface{}) bool {
+ return InEpsilonf(a.t, expected, actual, epsilon, msg, args...)
+}
+
// IsType asserts that the specified objects are of the same type.
func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
return IsType(a.t, expectedType, object, msgAndArgs...)
}
+// IsTypef asserts that the specified objects are of the same type.
+func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
+ return IsTypef(a.t, expectedType, object, msg, args...)
+}
+
// JSONEq asserts that two JSON strings are equivalent.
//
// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`)
@@ -205,30 +392,58 @@ func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interf
return JSONEq(a.t, expected, actual, msgAndArgs...)
}
+// JSONEqf asserts that two JSON strings are equivalent.
+//
+// a.JSONEqf(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) JSONEqf(expected string, actual string, msg string, args ...interface{}) bool {
+ return JSONEqf(a.t, expected, actual, msg, args...)
+}
+
// Len asserts that the specified object has specific length.
// Len also fails if the object has a type that len() not accept.
//
-// a.Len(mySlice, 3, "The size of slice is not 3")
+// a.Len(mySlice, 3)
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool {
return Len(a.t, object, length, msgAndArgs...)
}
+// Lenf asserts that the specified object has specific length.
+// Lenf also fails if the object has a type that len() not accept.
+//
+// a.Lenf(mySlice, 3, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Lenf(object interface{}, length int, msg string, args ...interface{}) bool {
+ return Lenf(a.t, object, length, msg, args...)
+}
+
// Nil asserts that the specified object is nil.
//
-// a.Nil(err, "err should be nothing")
+// a.Nil(err)
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool {
return Nil(a.t, object, msgAndArgs...)
}
+// Nilf asserts that the specified object is nil.
+//
+// a.Nilf(err, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Nilf(object interface{}, msg string, args ...interface{}) bool {
+ return Nilf(a.t, object, msg, args...)
+}
+
// NoError asserts that a function returned no error (i.e. `nil`).
//
// actualObj, err := SomeFunction()
// if a.NoError(err) {
-// assert.Equal(t, actualObj, expectedObj)
+// assert.Equal(t, expectedObj, actualObj)
// }
//
// Returns whether the assertion was successful (true) or not (false).
@@ -236,18 +451,42 @@ func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool {
return NoError(a.t, err, msgAndArgs...)
}
+// NoErrorf asserts that a function returned no error (i.e. `nil`).
+//
+// actualObj, err := SomeFunction()
+// if a.NoErrorf(err, "error message %s", "formatted") {
+// assert.Equal(t, expectedObj, actualObj)
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NoErrorf(err error, msg string, args ...interface{}) bool {
+ return NoErrorf(a.t, err, msg, args...)
+}
+
// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
// specified substring or element.
//
-// a.NotContains("Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'")
-// a.NotContains(["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'")
-// a.NotContains({"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'")
+// a.NotContains("Hello World", "Earth")
+// a.NotContains(["Hello", "World"], "Earth")
+// a.NotContains({"Hello": "World"}, "Earth")
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool {
return NotContains(a.t, s, contains, msgAndArgs...)
}
+// NotContainsf asserts that the specified string, list(array, slice...) or map does NOT contain the
+// specified substring or element.
+//
+// a.NotContainsf("Hello World", "Earth", "error message %s", "formatted")
+// a.NotContainsf(["Hello", "World"], "Earth", "error message %s", "formatted")
+// a.NotContainsf({"Hello": "World"}, "Earth", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotContainsf(s interface{}, contains interface{}, msg string, args ...interface{}) bool {
+ return NotContainsf(a.t, s, contains, msg, args...)
+}
+
// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
// a slice or a channel with len == 0.
//
@@ -260,9 +499,21 @@ func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) boo
return NotEmpty(a.t, object, msgAndArgs...)
}
+// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either
+// a slice or a channel with len == 0.
+//
+// if a.NotEmptyf(obj, "error message %s", "formatted") {
+// assert.Equal(t, "two", obj[1])
+// }
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotEmptyf(object interface{}, msg string, args ...interface{}) bool {
+ return NotEmptyf(a.t, object, msg, args...)
+}
+
// NotEqual asserts that the specified values are NOT equal.
//
-// a.NotEqual(obj1, obj2, "two objects shouldn't be equal")
+// a.NotEqual(obj1, obj2)
//
// Returns whether the assertion was successful (true) or not (false).
//
@@ -272,26 +523,54 @@ func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndAr
return NotEqual(a.t, expected, actual, msgAndArgs...)
}
+// NotEqualf asserts that the specified values are NOT equal.
+//
+// a.NotEqualf(obj1, obj2, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+//
+// Pointer variable equality is determined based on the equality of the
+// referenced values (as opposed to the memory addresses).
+func (a *Assertions) NotEqualf(expected interface{}, actual interface{}, msg string, args ...interface{}) bool {
+ return NotEqualf(a.t, expected, actual, msg, args...)
+}
+
// NotNil asserts that the specified object is not nil.
//
-// a.NotNil(err, "err should be something")
+// a.NotNil(err)
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool {
return NotNil(a.t, object, msgAndArgs...)
}
+// NotNilf asserts that the specified object is not nil.
+//
+// a.NotNilf(err, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotNilf(object interface{}, msg string, args ...interface{}) bool {
+ return NotNilf(a.t, object, msg, args...)
+}
+
// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
//
-// a.NotPanics(func(){
-// RemainCalm()
-// }, "Calling RemainCalm() should NOT panic")
+// a.NotPanics(func(){ RemainCalm() })
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
return NotPanics(a.t, f, msgAndArgs...)
}
+// NotPanicsf asserts that the code inside the specified PanicTestFunc does NOT panic.
+//
+// a.NotPanicsf(func(){ RemainCalm() }, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotPanicsf(f PanicTestFunc, msg string, args ...interface{}) bool {
+ return NotPanicsf(a.t, f, msg, args...)
+}
+
// NotRegexp asserts that a specified regexp does not match a string.
//
// a.NotRegexp(regexp.MustCompile("starts"), "it's starting")
@@ -302,22 +581,84 @@ func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...in
return NotRegexp(a.t, rx, str, msgAndArgs...)
}
+// NotRegexpf asserts that a specified regexp does not match a string.
+//
+// a.NotRegexpf(regexp.MustCompile("starts", "error message %s", "formatted"), "it's starting")
+// a.NotRegexpf("^start", "it's not starting", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotRegexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool {
+ return NotRegexpf(a.t, rx, str, msg, args...)
+}
+
+// NotSubset asserts that the specified list(array, slice...) contains not all
+// elements given in the specified subset(array, slice...).
+//
+// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
+ return NotSubset(a.t, list, subset, msgAndArgs...)
+}
+
+// NotSubsetf asserts that the specified list(array, slice...) contains not all
+// elements given in the specified subset(array, slice...).
+//
+// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
+ return NotSubsetf(a.t, list, subset, msg, args...)
+}
+
// NotZero asserts that i is not the zero value for its type and returns the truth.
func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool {
return NotZero(a.t, i, msgAndArgs...)
}
+// NotZerof asserts that i is not the zero value for its type and returns the truth.
+func (a *Assertions) NotZerof(i interface{}, msg string, args ...interface{}) bool {
+ return NotZerof(a.t, i, msg, args...)
+}
+
// Panics asserts that the code inside the specified PanicTestFunc panics.
//
-// a.Panics(func(){
-// GoCrazy()
-// }, "Calling GoCrazy() should panic")
+// a.Panics(func(){ GoCrazy() })
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool {
return Panics(a.t, f, msgAndArgs...)
}
+// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+// a.PanicsWithValue("crazy error", func(){ GoCrazy() })
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) PanicsWithValue(expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+ return PanicsWithValue(a.t, expected, f, msgAndArgs...)
+}
+
+// PanicsWithValuef asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+// a.PanicsWithValuef("crazy error", func(){ GoCrazy() }, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) PanicsWithValuef(expected interface{}, f PanicTestFunc, msg string, args ...interface{}) bool {
+ return PanicsWithValuef(a.t, expected, f, msg, args...)
+}
+
+// Panicsf asserts that the code inside the specified PanicTestFunc panics.
+//
+// a.Panicsf(func(){ GoCrazy() }, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Panicsf(f PanicTestFunc, msg string, args ...interface{}) bool {
+ return Panicsf(a.t, f, msg, args...)
+}
+
// Regexp asserts that a specified regexp matches a string.
//
// a.Regexp(regexp.MustCompile("start"), "it's starting")
@@ -328,25 +669,78 @@ func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...inter
return Regexp(a.t, rx, str, msgAndArgs...)
}
+// Regexpf asserts that a specified regexp matches a string.
+//
+// a.Regexpf(regexp.MustCompile("start", "error message %s", "formatted"), "it's starting")
+// a.Regexpf("start...$", "it's not starting", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Regexpf(rx interface{}, str interface{}, msg string, args ...interface{}) bool {
+ return Regexpf(a.t, rx, str, msg, args...)
+}
+
+// Subset asserts that the specified list(array, slice...) contains all
+// elements given in the specified subset(array, slice...).
+//
+// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
+ return Subset(a.t, list, subset, msgAndArgs...)
+}
+
+// Subsetf asserts that the specified list(array, slice...) contains all
+// elements given in the specified subset(array, slice...).
+//
+// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
+ return Subsetf(a.t, list, subset, msg, args...)
+}
+
// True asserts that the specified value is true.
//
-// a.True(myBool, "myBool should be true")
+// a.True(myBool)
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool {
return True(a.t, value, msgAndArgs...)
}
+// Truef asserts that the specified value is true.
+//
+// a.Truef(myBool, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) Truef(value bool, msg string, args ...interface{}) bool {
+ return Truef(a.t, value, msg, args...)
+}
+
// WithinDuration asserts that the two times are within duration delta of each other.
//
-// a.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s")
+// a.WithinDuration(time.Now(), time.Now(), 10*time.Second)
//
// Returns whether the assertion was successful (true) or not (false).
func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {
return WithinDuration(a.t, expected, actual, delta, msgAndArgs...)
}
+// WithinDurationf asserts that the two times are within duration delta of each other.
+//
+// a.WithinDurationf(time.Now(), time.Now(), 10*time.Second, "error message %s", "formatted")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func (a *Assertions) WithinDurationf(expected time.Time, actual time.Time, delta time.Duration, msg string, args ...interface{}) bool {
+ return WithinDurationf(a.t, expected, actual, delta, msg, args...)
+}
+
// Zero asserts that i is the zero value for its type and returns the truth.
func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool {
return Zero(a.t, i, msgAndArgs...)
}
+
+// Zerof asserts that i is the zero value for its type and returns the truth.
+func (a *Assertions) Zerof(i interface{}, msg string, args ...interface{}) bool {
+ return Zerof(a.t, i, msg, args...)
+}
diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go
index d1552e5..8259050 100644
--- a/vendor/github.com/stretchr/testify/assert/assertions.go
+++ b/vendor/github.com/stretchr/testify/assert/assertions.go
@@ -4,6 +4,7 @@ import (
"bufio"
"bytes"
"encoding/json"
+ "errors"
"fmt"
"math"
"reflect"
@@ -18,6 +19,8 @@ import (
"github.com/pmezard/go-difflib/difflib"
)
+//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_format.go.tmpl
+
// TestingT is an interface wrapper around *testing.T
type TestingT interface {
Errorf(format string, args ...interface{})
@@ -38,7 +41,15 @@ func ObjectsAreEqual(expected, actual interface{}) bool {
if expected == nil || actual == nil {
return expected == actual
}
-
+ if exp, ok := expected.([]byte); ok {
+ act, ok := actual.([]byte)
+ if !ok {
+ return false
+ } else if exp == nil || act == nil {
+ return exp == nil && act == nil
+ }
+ return bytes.Equal(exp, act)
+ }
return reflect.DeepEqual(expected, actual)
}
@@ -108,10 +119,12 @@ func CallerInfo() []string {
}
parts := strings.Split(file, "/")
- dir := parts[len(parts)-2]
file = parts[len(parts)-1]
- if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" {
- callers = append(callers, fmt.Sprintf("%s:%d", file, line))
+ if len(parts) > 1 {
+ dir := parts[len(parts)-2]
+ if (dir != "assert" && dir != "mock" && dir != "require") || file == "mock_test.go" {
+ callers = append(callers, fmt.Sprintf("%s:%d", file, line))
+ }
}
// Drop the package
@@ -181,7 +194,7 @@ func indentMessageLines(message string, longestLabelLen int) string {
// no need to align first line because it starts at the correct location (after the label)
if i != 0 {
// append alignLen+1 spaces to align with "{{longestLabel}}:" before adding tab
- outBuf.WriteString("\n\r\t" + strings.Repeat(" ", longestLabelLen +1) + "\t")
+ outBuf.WriteString("\n\r\t" + strings.Repeat(" ", longestLabelLen+1) + "\t")
}
outBuf.WriteString(scanner.Text())
}
@@ -223,13 +236,13 @@ func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool {
content = append(content, labeledContent{"Messages", message})
}
- t.Errorf("\r" + getWhitespaceString() + labeledOutput(content...))
+ t.Errorf("%s", "\r"+getWhitespaceString()+labeledOutput(content...))
return false
}
type labeledContent struct {
- label string
+ label string
content string
}
@@ -258,7 +271,7 @@ func labeledOutput(content ...labeledContent) string {
// Implements asserts that an object is implemented by the specified interface.
//
-// assert.Implements(t, (*MyInterface)(nil), new(MyObject), "MyObject")
+// assert.Implements(t, (*MyInterface)(nil), new(MyObject))
func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
interfaceType := reflect.TypeOf(interfaceObject).Elem()
@@ -283,20 +296,25 @@ func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs
// Equal asserts that two objects are equal.
//
-// assert.Equal(t, 123, 123, "123 and 123 should be equal")
+// assert.Equal(t, 123, 123)
//
// Returns whether the assertion was successful (true) or not (false).
//
// Pointer variable equality is determined based on the equality of the
-// referenced values (as opposed to the memory addresses).
+// referenced values (as opposed to the memory addresses). Function equality
+// cannot be determined and will always fail.
func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ if err := validateEqualArgs(expected, actual); err != nil {
+ return Fail(t, fmt.Sprintf("Invalid operation: %#v == %#v (%s)",
+ expected, actual, err), msgAndArgs...)
+ }
if !ObjectsAreEqual(expected, actual) {
diff := diff(expected, actual)
expected, actual = formatUnequalValues(expected, actual)
return Fail(t, fmt.Sprintf("Not equal: \n"+
"expected: %s\n"+
- "received: %s%s", expected, actual, diff), msgAndArgs...)
+ "actual: %s%s", expected, actual, diff), msgAndArgs...)
}
return true
@@ -322,7 +340,7 @@ func formatUnequalValues(expected, actual interface{}) (e string, a string) {
// EqualValues asserts that two objects are equal or convertable to the same types
// and equal.
//
-// assert.EqualValues(t, uint32(123), int32(123), "123 and 123 should be equal")
+// assert.EqualValues(t, uint32(123), int32(123))
//
// Returns whether the assertion was successful (true) or not (false).
func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
@@ -332,7 +350,7 @@ func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interfa
expected, actual = formatUnequalValues(expected, actual)
return Fail(t, fmt.Sprintf("Not equal: \n"+
"expected: %s\n"+
- "received: %s%s", expected, actual, diff), msgAndArgs...)
+ "actual: %s%s", expected, actual, diff), msgAndArgs...)
}
return true
@@ -341,7 +359,7 @@ func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interfa
// Exactly asserts that two objects are equal is value and type.
//
-// assert.Exactly(t, int32(123), int64(123), "123 and 123 should NOT be equal")
+// assert.Exactly(t, int32(123), int64(123))
//
// Returns whether the assertion was successful (true) or not (false).
func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
@@ -359,7 +377,7 @@ func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}
// NotNil asserts that the specified object is not nil.
//
-// assert.NotNil(t, err, "err should be something")
+// assert.NotNil(t, err)
//
// Returns whether the assertion was successful (true) or not (false).
func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
@@ -386,7 +404,7 @@ func isNil(object interface{}) bool {
// Nil asserts that the specified object is nil.
//
-// assert.Nil(t, err, "err should be nothing")
+// assert.Nil(t, err)
//
// Returns whether the assertion was successful (true) or not (false).
func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
@@ -431,9 +449,7 @@ func isEmpty(object interface{}) bool {
objValue := reflect.ValueOf(object)
switch objValue.Kind() {
- case reflect.Map:
- fallthrough
- case reflect.Slice, reflect.Chan:
+ case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
{
return (objValue.Len() == 0)
}
@@ -509,7 +525,7 @@ func getLen(x interface{}) (ok bool, length int) {
// Len asserts that the specified object has specific length.
// Len also fails if the object has a type that len() not accept.
//
-// assert.Len(t, mySlice, 3, "The size of slice is not 3")
+// assert.Len(t, mySlice, 3)
//
// Returns whether the assertion was successful (true) or not (false).
func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool {
@@ -526,7 +542,7 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{})
// True asserts that the specified value is true.
//
-// assert.True(t, myBool, "myBool should be true")
+// assert.True(t, myBool)
//
// Returns whether the assertion was successful (true) or not (false).
func True(t TestingT, value bool, msgAndArgs ...interface{}) bool {
@@ -541,7 +557,7 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) bool {
// False asserts that the specified value is false.
//
-// assert.False(t, myBool, "myBool should be false")
+// assert.False(t, myBool)
//
// Returns whether the assertion was successful (true) or not (false).
func False(t TestingT, value bool, msgAndArgs ...interface{}) bool {
@@ -556,13 +572,17 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) bool {
// NotEqual asserts that the specified values are NOT equal.
//
-// assert.NotEqual(t, obj1, obj2, "two objects shouldn't be equal")
+// assert.NotEqual(t, obj1, obj2)
//
// Returns whether the assertion was successful (true) or not (false).
//
// Pointer variable equality is determined based on the equality of the
// referenced values (as opposed to the memory addresses).
func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool {
+ if err := validateEqualArgs(expected, actual); err != nil {
+ return Fail(t, fmt.Sprintf("Invalid operation: %#v != %#v (%s)",
+ expected, actual, err), msgAndArgs...)
+ }
if ObjectsAreEqual(expected, actual) {
return Fail(t, fmt.Sprintf("Should not be: %#v\n", actual), msgAndArgs...)
@@ -613,9 +633,9 @@ func includeElement(list interface{}, element interface{}) (ok, found bool) {
// Contains asserts that the specified string, list(array, slice...) or map contains the
// specified substring or element.
//
-// assert.Contains(t, "Hello World", "World", "But 'Hello World' does contain 'World'")
-// assert.Contains(t, ["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'")
-// assert.Contains(t, {"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'")
+// assert.Contains(t, "Hello World", "World")
+// assert.Contains(t, ["Hello", "World"], "World")
+// assert.Contains(t, {"Hello": "World"}, "Hello")
//
// Returns whether the assertion was successful (true) or not (false).
func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool {
@@ -635,9 +655,9 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo
// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
// specified substring or element.
//
-// assert.NotContains(t, "Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'")
-// assert.NotContains(t, ["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'")
-// assert.NotContains(t, {"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'")
+// assert.NotContains(t, "Hello World", "Earth")
+// assert.NotContains(t, ["Hello", "World"], "Earth")
+// assert.NotContains(t, {"Hello": "World"}, "Earth")
//
// Returns whether the assertion was successful (true) or not (false).
func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bool {
@@ -654,6 +674,92 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{})
}
+// Subset asserts that the specified list(array, slice...) contains all
+// elements given in the specified subset(array, slice...).
+//
+// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
+ if subset == nil {
+ return true // we consider nil to be equal to the nil set
+ }
+
+ subsetValue := reflect.ValueOf(subset)
+ defer func() {
+ if e := recover(); e != nil {
+ ok = false
+ }
+ }()
+
+ listKind := reflect.TypeOf(list).Kind()
+ subsetKind := reflect.TypeOf(subset).Kind()
+
+ if listKind != reflect.Array && listKind != reflect.Slice {
+ return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
+ }
+
+ if subsetKind != reflect.Array && subsetKind != reflect.Slice {
+ return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
+ }
+
+ for i := 0; i < subsetValue.Len(); i++ {
+ element := subsetValue.Index(i).Interface()
+ ok, found := includeElement(list, element)
+ if !ok {
+ return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
+ }
+ if !found {
+ return Fail(t, fmt.Sprintf("\"%s\" does not contain \"%s\"", list, element), msgAndArgs...)
+ }
+ }
+
+ return true
+}
+
+// NotSubset asserts that the specified list(array, slice...) contains not all
+// elements given in the specified subset(array, slice...).
+//
+// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]")
+//
+// Returns whether the assertion was successful (true) or not (false).
+func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
+ if subset == nil {
+ return false // we consider nil to be equal to the nil set
+ }
+
+ subsetValue := reflect.ValueOf(subset)
+ defer func() {
+ if e := recover(); e != nil {
+ ok = false
+ }
+ }()
+
+ listKind := reflect.TypeOf(list).Kind()
+ subsetKind := reflect.TypeOf(subset).Kind()
+
+ if listKind != reflect.Array && listKind != reflect.Slice {
+ return Fail(t, fmt.Sprintf("%q has an unsupported type %s", list, listKind), msgAndArgs...)
+ }
+
+ if subsetKind != reflect.Array && subsetKind != reflect.Slice {
+ return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
+ }
+
+ for i := 0; i < subsetValue.Len(); i++ {
+ element := subsetValue.Index(i).Interface()
+ ok, found := includeElement(list, element)
+ if !ok {
+ return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
+ }
+ if !found {
+ return true
+ }
+ }
+
+ return Fail(t, fmt.Sprintf("%q is a subset of %q", subset, list), msgAndArgs...)
+}
+
// Condition uses a Comparison to assert a complex condition.
func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool {
result := comp()
@@ -691,9 +797,7 @@ func didPanic(f PanicTestFunc) (bool, interface{}) {
// Panics asserts that the code inside the specified PanicTestFunc panics.
//
-// assert.Panics(t, func(){
-// GoCrazy()
-// }, "Calling GoCrazy() should panic")
+// assert.Panics(t, func(){ GoCrazy() })
//
// Returns whether the assertion was successful (true) or not (false).
func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
@@ -705,11 +809,28 @@ func Panics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
return true
}
+// PanicsWithValue asserts that the code inside the specified PanicTestFunc panics, and that
+// the recovered panic value equals the expected panic value.
+//
+// assert.PanicsWithValue(t, "crazy error", func(){ GoCrazy() })
+//
+// Returns whether the assertion was successful (true) or not (false).
+func PanicsWithValue(t TestingT, expected interface{}, f PanicTestFunc, msgAndArgs ...interface{}) bool {
+
+ funcDidPanic, panicValue := didPanic(f)
+ if !funcDidPanic {
+ return Fail(t, fmt.Sprintf("func %#v should panic\n\r\tPanic value:\t%v", f, panicValue), msgAndArgs...)
+ }
+ if panicValue != expected {
+ return Fail(t, fmt.Sprintf("func %#v should panic with value:\t%v\n\r\tPanic value:\t%v", f, expected, panicValue), msgAndArgs...)
+ }
+
+ return true
+}
+
// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic.
//
-// assert.NotPanics(t, func(){
-// RemainCalm()
-// }, "Calling RemainCalm() should NOT panic")
+// assert.NotPanics(t, func(){ RemainCalm() })
//
// Returns whether the assertion was successful (true) or not (false).
func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
@@ -723,7 +844,7 @@ func NotPanics(t TestingT, f PanicTestFunc, msgAndArgs ...interface{}) bool {
// WithinDuration asserts that the two times are within duration delta of each other.
//
-// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s")
+// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second)
//
// Returns whether the assertion was successful (true) or not (false).
func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool {
@@ -763,6 +884,8 @@ func toFloat(x interface{}) (float64, bool) {
xf = float64(xn)
case float64:
xf = float64(xn)
+ case time.Duration:
+ xf = float64(xn)
default:
xok = false
}
@@ -785,7 +908,7 @@ func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs
}
if math.IsNaN(af) {
- return Fail(t, fmt.Sprintf("Actual must not be NaN"), msgAndArgs...)
+ return Fail(t, fmt.Sprintf("Expected must not be NaN"), msgAndArgs...)
}
if math.IsNaN(bf) {
@@ -812,7 +935,7 @@ func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAn
expectedSlice := reflect.ValueOf(expected)
for i := 0; i < actualSlice.Len(); i++ {
- result := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta)
+ result := InDelta(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta, msgAndArgs...)
if !result {
return result
}
@@ -831,7 +954,7 @@ func calcRelativeError(expected, actual interface{}) (float64, error) {
}
bf, bok := toFloat(actual)
if !bok {
- return 0, fmt.Errorf("expected value %q cannot be converted to float", actual)
+ return 0, fmt.Errorf("actual value %q cannot be converted to float", actual)
}
return math.Abs(af-bf) / math.Abs(af), nil
@@ -847,7 +970,7 @@ func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAnd
}
if actualEpsilon > epsilon {
return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+
- " < %#v (actual)", actualEpsilon, epsilon), msgAndArgs...)
+ " < %#v (actual)", epsilon, actualEpsilon), msgAndArgs...)
}
return true
@@ -882,7 +1005,7 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m
//
// actualObj, err := SomeFunction()
// if assert.NoError(t, err) {
-// assert.Equal(t, actualObj, expectedObj)
+// assert.Equal(t, expectedObj, actualObj)
// }
//
// Returns whether the assertion was successful (true) or not (false).
@@ -897,8 +1020,8 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool {
// Error asserts that a function returned an error (i.e. not `nil`).
//
// actualObj, err := SomeFunction()
-// if assert.Error(t, err, "An error was expected") {
-// assert.Equal(t, err, expectedError)
+// if assert.Error(t, err) {
+// assert.Equal(t, expectedError, err)
// }
//
// Returns whether the assertion was successful (true) or not (false).
@@ -915,7 +1038,7 @@ func Error(t TestingT, err error, msgAndArgs ...interface{}) bool {
// and that it is equal to the provided error.
//
// actualObj, err := SomeFunction()
-// assert.EqualError(t, err, expectedErrorString, "An error was expected")
+// assert.EqualError(t, err, expectedErrorString)
//
// Returns whether the assertion was successful (true) or not (false).
func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) bool {
@@ -928,7 +1051,7 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte
if expected != actual {
return Fail(t, fmt.Sprintf("Error message not equal:\n"+
"expected: %q\n"+
- "received: %q", expected, actual), msgAndArgs...)
+ "actual: %q", expected, actual), msgAndArgs...)
}
return true
}
@@ -1061,6 +1184,22 @@ func diff(expected interface{}, actual interface{}) string {
return "\n\nDiff:\n" + diff
}
+// validateEqualArgs checks whether provided arguments can be safely used in the
+// Equal/NotEqual functions.
+func validateEqualArgs(expected, actual interface{}) error {
+ if isFunction(expected) || isFunction(actual) {
+ return errors.New("cannot take func type as argument")
+ }
+ return nil
+}
+
+func isFunction(arg interface{}) bool {
+ if arg == nil {
+ return false
+ }
+ return reflect.TypeOf(arg).Kind() == reflect.Func
+}
+
var spewConfig = spew.ConfigState{
Indent: " ",
DisablePointerAddresses: true,
diff --git a/vendor/github.com/stretchr/testify/assert/forward_assertions.go b/vendor/github.com/stretchr/testify/assert/forward_assertions.go
index b867e95..9ad5685 100644
--- a/vendor/github.com/stretchr/testify/assert/forward_assertions.go
+++ b/vendor/github.com/stretchr/testify/assert/forward_assertions.go
@@ -13,4 +13,4 @@ func New(t TestingT) *Assertions {
}
}
-//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl
+//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl -include-format-funcs
diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go
index fa7ab89..ba811c0 100644
--- a/vendor/github.com/stretchr/testify/assert/http_assertions.go
+++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go
@@ -8,16 +8,16 @@ import (
"strings"
)
-// httpCode is a helper that returns HTTP code of the response. It returns -1
-// if building a new request fails.
-func httpCode(handler http.HandlerFunc, method, url string, values url.Values) int {
+// httpCode is a helper that returns HTTP code of the response. It returns -1 and
+// an error if building a new request fails.
+func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) {
w := httptest.NewRecorder()
req, err := http.NewRequest(method, url+"?"+values.Encode(), nil)
if err != nil {
- return -1
+ return -1, err
}
handler(w, req)
- return w.Code
+ return w.Code, nil
}
// HTTPSuccess asserts that a specified handler returns a success status code.
@@ -26,11 +26,18 @@ func httpCode(handler http.HandlerFunc, method, url string, values url.Values) i
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool {
- code := httpCode(handler, method, url, values)
- if code == -1 {
+ code, err := httpCode(handler, method, url, values)
+ if err != nil {
+ Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
return false
}
- return code >= http.StatusOK && code <= http.StatusPartialContent
+
+ isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent
+ if !isSuccessCode {
+ Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code))
+ }
+
+ return isSuccessCode
}
// HTTPRedirect asserts that a specified handler returns a redirect status code.
@@ -39,11 +46,18 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, value
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool {
- code := httpCode(handler, method, url, values)
- if code == -1 {
+ code, err := httpCode(handler, method, url, values)
+ if err != nil {
+ Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
return false
}
- return code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect
+
+ isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect
+ if !isRedirectCode {
+ Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code))
+ }
+
+ return isRedirectCode
}
// HTTPError asserts that a specified handler returns an error status code.
@@ -52,11 +66,18 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, valu
//
// Returns whether the assertion was successful (true) or not (false).
func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values url.Values) bool {
- code := httpCode(handler, method, url, values)
- if code == -1 {
+ code, err := httpCode(handler, method, url, values)
+ if err != nil {
+ Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err))
return false
}
- return code >= http.StatusBadRequest
+
+ isErrorCode := code >= http.StatusBadRequest
+ if !isErrorCode {
+ Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code))
+ }
+
+ return isErrorCode
}
// HTTPBody is a helper that returns HTTP body of the response. It returns
diff --git a/vendor/github.com/xanzy/go-gitlab/CHANGELOG.md b/vendor/github.com/xanzy/go-gitlab/CHANGELOG.md
index 09cb6db..29e93ff 100644
--- a/vendor/github.com/xanzy/go-gitlab/CHANGELOG.md
+++ b/vendor/github.com/xanzy/go-gitlab/CHANGELOG.md
@@ -1,6 +1,11 @@
go-github CHANGELOG
===================
+0.6.0
+-----
+- Add support for the V4 Gitlab API. This means the older V3 API is no longer fully supported
+ with this version. If you still need that version, please use the `f-api-v3` branch.
+
0.4.0
-----
- Add support to use [`sudo`](https://docs.gitlab.com/ce/api/README.html#sudo) for all API calls.
diff --git a/vendor/github.com/xanzy/go-gitlab/README.md b/vendor/github.com/xanzy/go-gitlab/README.md
index 7b6d1bc..5c3e29b 100644
--- a/vendor/github.com/xanzy/go-gitlab/README.md
+++ b/vendor/github.com/xanzy/go-gitlab/README.md
@@ -7,8 +7,9 @@ A GitLab API client enabling Go programs to interact with GitLab in a simple and
## NOTE
-Release v0.5.0 (released on 22-03-2017) no longer supports Go versions older
-then 1.7.x If you want (or need) to use an older Go version please use v0.4.1
+Release v0.6.0 (released on 25-08-2017) no longer supports the older V3 Gitlab API. If
+you need V3 support, please use the `f-api-v3` branch. This release contains some backwards
+incompatible changes that were needed to fully support the V4 Gitlab API.
## Coverage
@@ -35,6 +36,8 @@ includes all calls to the following services:
- [x] Namespaces
- [x] Settings
- [x] Pipelines
+- [x] Version
+- [x] Wikis
## Usage
@@ -57,7 +60,7 @@ to list all projects for user "svanharmelen":
```go
git := gitlab.NewClient(nil)
-opt := &ListProjectsOptions{Search: gitlab.String("svanharmelen")})
+opt := &ListProjectsOptions{Search: gitlab.String("svanharmelen")}
projects, _, err := git.Projects.ListProjects(opt)
```
@@ -84,7 +87,7 @@ func main() {
Description: gitlab.String("Just a test project to play with"),
MergeRequestsEnabled: gitlab.Bool(true),
SnippetsEnabled: gitlab.Bool(true),
- VisibilityLevel: gitlab.VisibilityLevel(gitlab.PublicVisibility),
+ Visibility: gitlab.VisibilityLevel(gitlab.PublicVisibility),
}
project, _, err := git.Projects.CreateProject(p)
if err != nil {
@@ -96,7 +99,7 @@ func main() {
Title: gitlab.String("Dummy Snippet"),
FileName: gitlab.String("snippet.go"),
Code: gitlab.String("package main...."),
- VisibilityLevel: gitlab.VisibilityLevel(gitlab.PublicVisibility),
+ Visibility: gitlab.VisibilityLevel(gitlab.PublicVisibility),
}
_, _, err = git.ProjectSnippets.CreateSnippet(project.ID, s)
if err != nil {
diff --git a/vendor/github.com/xanzy/go-gitlab/branches.go b/vendor/github.com/xanzy/go-gitlab/branches.go
index 838245e..a792e3f 100644
--- a/vendor/github.com/xanzy/go-gitlab/branches.go
+++ b/vendor/github.com/xanzy/go-gitlab/branches.go
@@ -1,5 +1,5 @@
//
-// Copyright 2015, Sander van Harmelen
+// Copyright 2017, Sander van Harmelen
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -24,16 +24,14 @@ import (
// BranchesService handles communication with the branch related methods
// of the GitLab API.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/branches.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/branches.html
type BranchesService struct {
client *Client
}
// Branch represents a GitLab branch.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/branches.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/branches.html
type Branch struct {
Commit *Commit `json:"commit"`
Name string `json:"name"`
@@ -50,7 +48,7 @@ func (b Branch) String() string {
// ListBranchesOptions represents the available ListBranches() options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/branches.md#list-repository-branches
+// https://docs.gitlab.com/ce/api/branches.html#list-repository-branches
type ListBranchesOptions struct {
ListOptions
}
@@ -59,7 +57,7 @@ type ListBranchesOptions struct {
// name alphabetically.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/branches.md#list-repository-branches
+// https://docs.gitlab.com/ce/api/branches.html#list-repository-branches
func (s *BranchesService) ListBranches(pid interface{}, opts *ListBranchesOptions, options ...OptionFunc) ([]*Branch, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -84,7 +82,7 @@ func (s *BranchesService) ListBranches(pid interface{}, opts *ListBranchesOption
// GetBranch gets a single project repository branch.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/branches.md#get-single-repository-branch
+// https://docs.gitlab.com/ce/api/branches.html#get-single-repository-branch
func (s *BranchesService) GetBranch(pid interface{}, branch string, options ...OptionFunc) (*Branch, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -109,7 +107,7 @@ func (s *BranchesService) GetBranch(pid interface{}, branch string, options ...O
// ProtectBranchOptions represents the available ProtectBranch() options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/branches.md#protect-repository-branch
+// https://docs.gitlab.com/ce/api/branches.html#protect-repository-branch
type ProtectBranchOptions struct {
DevelopersCanPush *bool `url:"developers_can_push,omitempty" json:"developers_can_push,omitempty"`
DevelopersCanMerge *bool `url:"developers_can_merge,omitempty" json:"developers_can_merge,omitempty"`
@@ -120,7 +118,7 @@ type ProtectBranchOptions struct {
// still returns a 200 OK status code.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/branches.md#protect-repository-branch
+// https://docs.gitlab.com/ce/api/branches.html#protect-repository-branch
func (s *BranchesService) ProtectBranch(pid interface{}, branch string, opts *ProtectBranchOptions, options ...OptionFunc) (*Branch, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -147,7 +145,7 @@ func (s *BranchesService) ProtectBranch(pid interface{}, branch string, opts *Pr
// still returns a 200 OK status code.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/branches.md#unprotect-repository-branch
+// https://docs.gitlab.com/ce/api/branches.html#unprotect-repository-branch
func (s *BranchesService) UnprotectBranch(pid interface{}, branch string, options ...OptionFunc) (*Branch, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -172,16 +170,16 @@ func (s *BranchesService) UnprotectBranch(pid interface{}, branch string, option
// CreateBranchOptions represents the available CreateBranch() options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/branches.md#create-repository-branch
+// https://docs.gitlab.com/ce/api/branches.html#create-repository-branch
type CreateBranchOptions struct {
- BranchName *string `url:"branch_name,omitempty" json:"branch_name,omitempty"`
- Ref *string `url:"ref,omitempty" json:"ref,omitempty"`
+ Branch *string `url:"branch,omitempty" json:"branch,omitempty"`
+ Ref *string `url:"ref,omitempty" json:"ref,omitempty"`
}
// CreateBranch creates branch from commit SHA or existing branch.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/branches.md#create-repository-branch
+// https://docs.gitlab.com/ce/api/branches.html#create-repository-branch
func (s *BranchesService) CreateBranch(pid interface{}, opt *CreateBranchOptions, options ...OptionFunc) (*Branch, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -206,7 +204,7 @@ func (s *BranchesService) CreateBranch(pid interface{}, opt *CreateBranchOptions
// DeleteBranch deletes an existing branch.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/branches.md#delete-repository-branch
+// https://docs.gitlab.com/ce/api/branches.html#delete-repository-branch
func (s *BranchesService) DeleteBranch(pid interface{}, branch string, options ...OptionFunc) (*Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -225,7 +223,7 @@ func (s *BranchesService) DeleteBranch(pid interface{}, branch string, options .
// DeleteMergedBranches deletes all branches that are merged into the project's default branch.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/branches.md#delete-merged-branches
+// https://docs.gitlab.com/ce/api/branches.html#delete-merged-branches
func (s *BranchesService) DeleteMergedBranches(pid interface{}, options ...OptionFunc) (*Response, error) {
project, err := parseID(pid)
if err != nil {
diff --git a/vendor/github.com/xanzy/go-gitlab/build_variables.go b/vendor/github.com/xanzy/go-gitlab/build_variables.go
index a966825..465042d 100644
--- a/vendor/github.com/xanzy/go-gitlab/build_variables.go
+++ b/vendor/github.com/xanzy/go-gitlab/build_variables.go
@@ -8,17 +8,18 @@ import (
// BuildVariablesService handles communication with the project variables related methods
// of the Gitlab API
//
-// Gitlab API Docs : https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/build_variables.md
+// Gitlab API Docs : https://docs.gitlab.com/ce/api/build_variables.html
type BuildVariablesService struct {
client *Client
}
// BuildVariable represents a variable available for each build of the given project
//
-// Gitlab API Docs : https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/build_variables.md
+// Gitlab API Docs : https://docs.gitlab.com/ce/api/build_variables.html
type BuildVariable struct {
- Key string `json:"key"`
- Value string `json:"value"`
+ Key string `json:"key"`
+ Value string `json:"value"`
+ Protected bool `json:"protected"`
}
func (v BuildVariable) String() string {
@@ -28,7 +29,7 @@ func (v BuildVariable) String() string {
// ListBuildVariablesOptions are the parameters to ListBuildVariables()
//
// Gitlab API Docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/build_variables.md#list-project-variables
+// https://docs.gitlab.com/ce/api/build_variables.html#list-project-variables
type ListBuildVariablesOptions struct {
ListOptions
}
@@ -36,7 +37,7 @@ type ListBuildVariablesOptions struct {
// ListBuildVariables gets the a list of project variables in a project
//
// Gitlab API Docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/build_variables.md#list-project-variables
+// https://docs.gitlab.com/ce/api/build_variables.html#list-project-variables
func (s *BuildVariablesService) ListBuildVariables(pid interface{}, opts *ListBuildVariablesOptions, options ...OptionFunc) ([]*BuildVariable, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -61,7 +62,7 @@ func (s *BuildVariablesService) ListBuildVariables(pid interface{}, opts *ListBu
// GetBuildVariable gets a single project variable of a project
//
// Gitlab API Docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/build_variables.md#show-variable-details
+// https://docs.gitlab.com/ce/api/build_variables.html#show-variable-details
func (s *BuildVariablesService) GetBuildVariable(pid interface{}, key string, options ...OptionFunc) (*BuildVariable, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -83,18 +84,28 @@ func (s *BuildVariablesService) GetBuildVariable(pid interface{}, key string, op
return v, resp, err
}
+// CreateBuildVariableOptions are the parameters to CreateBuildVariable()
+//
+// Gitlab API Docs:
+// https://docs.gitlab.com/ce/api/build_variables.html#create-variable
+type CreateBuildVariableOptions struct {
+ Key *string `url:"key" json:"key"`
+ Value *string `url:"value" json:"value"`
+ Protected *bool `url:"protected,omitempty" json:"protected,omitempty"`
+}
+
// CreateBuildVariable creates a variable for a given project
//
// Gitlab API Docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/build_variables.md#create-variable
-func (s *BuildVariablesService) CreateBuildVariable(pid interface{}, key, value string, options ...OptionFunc) (*BuildVariable, *Response, error) {
+// https://docs.gitlab.com/ce/api/build_variables.html#create-variable
+func (s *BuildVariablesService) CreateBuildVariable(pid interface{}, opt *CreateBuildVariableOptions, options ...OptionFunc) (*BuildVariable, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
u := fmt.Sprintf("projects/%s/variables", url.QueryEscape(project))
- req, err := s.client.NewRequest("POST", u, BuildVariable{key, value}, options)
+ req, err := s.client.NewRequest("POST", u, opt, options)
if err != nil {
return nil, nil, err
}
@@ -108,19 +119,29 @@ func (s *BuildVariablesService) CreateBuildVariable(pid interface{}, key, value
return v, resp, err
}
+// UpdateBuildVariableOptions are the parameters to UpdateBuildVariable()
+//
+// Gitlab API Docs:
+// https://docs.gitlab.com/ce/api/build_variables.html#update-variable
+type UpdateBuildVariableOptions struct {
+ Key *string `url:"key" json:"key"`
+ Value *string `url:"value" json:"value"`
+ Protected *bool `url:"protected,omitempty" json:"protected,omitempty"`
+}
+
// UpdateBuildVariable updates an existing project variable
// The variable key must exist
//
// Gitlab API Docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/build_variables.md#update-variable
-func (s *BuildVariablesService) UpdateBuildVariable(pid interface{}, key, value string, options ...OptionFunc) (*BuildVariable, *Response, error) {
+// https://docs.gitlab.com/ce/api/build_variables.html#update-variable
+func (s *BuildVariablesService) UpdateBuildVariable(pid interface{}, key string, opt *UpdateBuildVariableOptions, options ...OptionFunc) (*BuildVariable, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
u := fmt.Sprintf("projects/%s/variables/%s", url.QueryEscape(project), key)
- req, err := s.client.NewRequest("PUT", u, BuildVariable{key, value}, options)
+ req, err := s.client.NewRequest("PUT", u, opt, options)
if err != nil {
return nil, nil, err
}
@@ -137,7 +158,7 @@ func (s *BuildVariablesService) UpdateBuildVariable(pid interface{}, key, value
// RemoveBuildVariable removes a project variable of a given project identified by its key
//
// Gitlab API Docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/build_variables.md#remove-variable
+// https://docs.gitlab.com/ce/api/build_variables.html#remove-variable
func (s *BuildVariablesService) RemoveBuildVariable(pid interface{}, key string, options ...OptionFunc) (*Response, error) {
project, err := parseID(pid)
if err != nil {
diff --git a/vendor/github.com/xanzy/go-gitlab/builds.go b/vendor/github.com/xanzy/go-gitlab/builds.go
deleted file mode 100644
index 9396729..0000000
--- a/vendor/github.com/xanzy/go-gitlab/builds.go
+++ /dev/null
@@ -1,351 +0,0 @@
-//
-// Copyright 2016, Arkbriar
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-//
-
-package gitlab
-
-import (
- "bytes"
- "fmt"
- "io"
- "net/url"
- "time"
-)
-
-// ListBuildsOptions are options for two list apis
-type ListBuildsOptions struct {
- ListOptions
- Scope []BuildState `url:"scope,omitempty" json:"scope,omitempty"`
-}
-
-// BuildsService handles communication with the ci builds related methods
-// of the GitLab API.
-//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/builds.md
-type BuildsService struct {
- client *Client
-}
-
-// Build represents a ci build.
-//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/builds.md
-type Build struct {
- Commit *Commit `json:"commit"`
- CreatedAt *time.Time `json:"created_at"`
- ArtifactsFile struct {
- Filename string `json:"filename"`
- Size int `json:"size"`
- } `json:"artifacts_file"`
- FinishedAt *time.Time `json:"finished_at"`
- ID int `json:"id"`
- Name string `json:"name"`
- Ref string `json:"ref"`
- Runner struct {
- ID int `json:"id"`
- Description string `json:"description"`
- Active bool `json:"active"`
- IsShared bool `json:"is_shared"`
- Name string `json:"name"`
- } `json:"runner"`
- Stage string `json:"stage"`
- StartedAt *time.Time `json:"started_at"`
- Status string `json:"status"`
- Tag bool `json:"tag"`
- User *User `json:"user"`
-}
-
-// ListProjectBuilds gets a list of builds in a project.
-//
-// The scope of builds to show, one or array of: pending, running,
-// failed, success, canceled; showing all builds if none provided.
-//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/builds.md#list-project-builds
-func (s *BuildsService) ListProjectBuilds(pid interface{}, opts *ListBuildsOptions, options ...OptionFunc) ([]Build, *Response, error) {
- project, err := parseID(pid)
- if err != nil {
- return nil, nil, err
- }
- u := fmt.Sprintf("projects/%s/builds", url.QueryEscape(project))
-
- req, err := s.client.NewRequest("GET", u, opts, options)
- if err != nil {
- return nil, nil, err
- }
-
- var builds []Build
- resp, err := s.client.Do(req, &builds)
- if err != nil {
- return nil, resp, err
- }
-
- return builds, resp, err
-}
-
-// ListCommitBuilds gets a list of builds for specific commit in a
-// project. If the commit SHA is not found, it will respond with 404.
-//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/builds.md#list-commit-builds
-func (s *BuildsService) ListCommitBuilds(pid interface{}, sha string, opts *ListBuildsOptions, options ...OptionFunc) ([]Build, *Response, error) {
- project, err := parseID(pid)
- if err != nil {
- return nil, nil, err
- }
- u := fmt.Sprintf("projects/%s/repository/commits/%s/builds", project, sha)
-
- req, err := s.client.NewRequest("GET", u, opts, options)
- if err != nil {
- return nil, nil, err
- }
-
- var builds []Build
- resp, err := s.client.Do(req, &builds)
- if err != nil {
- return nil, resp, err
- }
-
- return builds, resp, err
-}
-
-// GetBuild gets a single build of a project.
-//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/builds.md#get-a-single-build
-func (s *BuildsService) GetBuild(pid interface{}, buildID int, options ...OptionFunc) (*Build, *Response, error) {
- project, err := parseID(pid)
- if err != nil {
- return nil, nil, err
- }
- u := fmt.Sprintf("projects/%s/builds/%d", project, buildID)
-
- req, err := s.client.NewRequest("GET", u, nil, options)
- if err != nil {
- return nil, nil, err
- }
-
- build := new(Build)
- resp, err := s.client.Do(req, build)
- if err != nil {
- return nil, resp, err
- }
-
- return build, resp, err
-}
-
-// GetBuildArtifacts get builds artifacts of a project
-//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/builds.md#get-build-artifacts
-func (s *BuildsService) GetBuildArtifacts(pid interface{}, buildID int, options ...OptionFunc) (io.Reader, *Response, error) {
- project, err := parseID(pid)
- if err != nil {
- return nil, nil, err
- }
- u := fmt.Sprintf("projects/%s/builds/%d/artifacts", project, buildID)
-
- req, err := s.client.NewRequest("GET", u, nil, options)
- if err != nil {
- return nil, nil, err
- }
-
- artifactsBuf := new(bytes.Buffer)
- resp, err := s.client.Do(req, artifactsBuf)
- if err != nil {
- return nil, resp, err
- }
-
- return artifactsBuf, resp, err
-}
-
-// DownloadArtifactsFile download the artifacts file from the given
-// reference name and job provided the build finished successfully.
-//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/builds.md#download-the-artifacts-file
-func (s *BuildsService) DownloadArtifactsFile(pid interface{}, refName string, job string, options ...OptionFunc) (io.Reader, *Response, error) {
- project, err := parseID(pid)
- if err != nil {
- return nil, nil, err
- }
- u := fmt.Sprintf("projects/%s/builds/artifacts/%s/download?job=%s", project, refName, job)
-
- req, err := s.client.NewRequest("GET", u, nil, options)
- if err != nil {
- return nil, nil, err
- }
-
- artifactsBuf := new(bytes.Buffer)
- resp, err := s.client.Do(req, artifactsBuf)
- if err != nil {
- return nil, resp, err
- }
-
- return artifactsBuf, resp, err
-}
-
-// GetTraceFile gets a trace of a specific build of a project
-//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/builds.md#get-a-trace-file
-func (s *BuildsService) GetTraceFile(pid interface{}, buildID int, options ...OptionFunc) (io.Reader, *Response, error) {
- project, err := parseID(pid)
- if err != nil {
- return nil, nil, err
- }
- u := fmt.Sprintf("projects/%s/builds/%d/trace", project, buildID)
-
- req, err := s.client.NewRequest("GET", u, nil, options)
- if err != nil {
- return nil, nil, err
- }
-
- traceBuf := new(bytes.Buffer)
- resp, err := s.client.Do(req, traceBuf)
- if err != nil {
- return nil, resp, err
- }
-
- return traceBuf, resp, err
-}
-
-// CancelBuild cancels a single build of a project.
-//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/builds.md#cancel-a-build
-func (s *BuildsService) CancelBuild(pid interface{}, buildID int, options ...OptionFunc) (*Build, *Response, error) {
- project, err := parseID(pid)
- if err != nil {
- return nil, nil, err
- }
- u := fmt.Sprintf("projects/%s/builds/%d/cancel", project, buildID)
-
- req, err := s.client.NewRequest("POST", u, nil, options)
- if err != nil {
- return nil, nil, err
- }
-
- build := new(Build)
- resp, err := s.client.Do(req, build)
- if err != nil {
- return nil, resp, err
- }
-
- return build, resp, err
-}
-
-// RetryBuild retries a single build of a project
-//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/builds.md#retry-a-build
-func (s *BuildsService) RetryBuild(pid interface{}, buildID int, options ...OptionFunc) (*Build, *Response, error) {
- project, err := parseID(pid)
- if err != nil {
- return nil, nil, err
- }
- u := fmt.Sprintf("projects/%s/builds/%d/retry", project, buildID)
-
- req, err := s.client.NewRequest("POST", u, nil, options)
- if err != nil {
- return nil, nil, err
- }
-
- build := new(Build)
- resp, err := s.client.Do(req, build)
- if err != nil {
- return nil, resp, err
- }
-
- return build, resp, err
-}
-
-// EraseBuild erases a single build of a project, removes a build
-// artifacts and a build trace.
-//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/builds.md#erase-a-build
-func (s *BuildsService) EraseBuild(pid interface{}, buildID int, options ...OptionFunc) (*Build, *Response, error) {
- project, err := parseID(pid)
- if err != nil {
- return nil, nil, err
- }
- u := fmt.Sprintf("projects/%s/builds/%d/erase", project, buildID)
-
- req, err := s.client.NewRequest("POST", u, nil, options)
- if err != nil {
- return nil, nil, err
- }
-
- build := new(Build)
- resp, err := s.client.Do(req, build)
- if err != nil {
- return nil, resp, err
- }
-
- return build, resp, err
-}
-
-// KeepArtifacts prevents artifacts from being deleted when
-// expiration is set.
-//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/builds.md#keep-artifacts
-func (s *BuildsService) KeepArtifacts(pid interface{}, buildID int, options ...OptionFunc) (*Build, *Response, error) {
- project, err := parseID(pid)
- if err != nil {
- return nil, nil, err
- }
- u := fmt.Sprintf("projects/%s/builds/%d/artifacts/keep", project, buildID)
-
- req, err := s.client.NewRequest("POST", u, nil, options)
- if err != nil {
- return nil, nil, err
- }
-
- build := new(Build)
- resp, err := s.client.Do(req, build)
- if err != nil {
- return nil, resp, err
- }
-
- return build, resp, err
-}
-
-// PlayBuild triggers a nanual action to start a build.
-//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/builds.md#play-a-build
-func (s *BuildsService) PlayBuild(pid interface{}, buildID int, options ...OptionFunc) (*Build, *Response, error) {
- project, err := parseID(pid)
- if err != nil {
- return nil, nil, err
- }
- u := fmt.Sprintf("projects/%s/builds/%d/play", project, buildID)
-
- req, err := s.client.NewRequest("POST", u, nil, options)
- if err != nil {
- return nil, nil, err
- }
-
- build := new(Build)
- resp, err := s.client.Do(req, build)
- if err != nil {
- return nil, resp, err
- }
-
- return build, resp, err
-}
diff --git a/vendor/github.com/xanzy/go-gitlab/commits.go b/vendor/github.com/xanzy/go-gitlab/commits.go
index e2e7de8..3f4db78 100644
--- a/vendor/github.com/xanzy/go-gitlab/commits.go
+++ b/vendor/github.com/xanzy/go-gitlab/commits.go
@@ -1,5 +1,5 @@
//
-// Copyright 2015, Sander van Harmelen
+// Copyright 2017, Sander van Harmelen
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -25,16 +25,14 @@ import (
// CommitsService handles communication with the commit related methods
// of the GitLab API.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/commits.html
type CommitsService struct {
client *Client
}
// Commit represents a GitLab commit.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/commits.html
type Commit struct {
ID string `json:"id"`
ShortID string `json:"short_id"`
@@ -54,8 +52,7 @@ type Commit struct {
// CommitStats represents the number of added and deleted files in a commit.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/commits.html
type CommitStats struct {
Additions int `json:"additions"`
Deletions int `json:"deletions"`
@@ -68,8 +65,7 @@ func (c Commit) String() string {
// ListCommitsOptions represents the available ListCommits() options.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md#list-repository-commits
+// GitLab API docs: https://docs.gitlab.com/ce/api/commits.html#list-repository-commits
type ListCommitsOptions struct {
ListOptions
RefName *string `url:"ref_name,omitempty" json:"ref_name,omitempty"`
@@ -79,8 +75,7 @@ type ListCommitsOptions struct {
// ListCommits gets a list of repository commits in a project.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md#list-commits
+// GitLab API docs: https://docs.gitlab.com/ce/api/commits.html#list-commits
func (s *CommitsService) ListCommits(pid interface{}, opt *ListCommitsOptions, options ...OptionFunc) ([]*Commit, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -104,8 +99,7 @@ func (s *CommitsService) ListCommits(pid interface{}, opt *ListCommitsOptions, o
// FileAction represents the available actions that can be performed on a file.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md#create-a-commit-with-multiple-files-and-actions
+// GitLab API docs: https://docs.gitlab.com/ce/api/commits.html#create-a-commit-with-multiple-files-and-actions
type FileAction string
// The available file actions.
@@ -118,43 +112,31 @@ const (
// CommitAction represents a single file action within a commit.
type CommitAction struct {
- Action FileAction `url:"action" json:"action,omitempty"`
- FilePath string `url:"file_path" json:"file_path,omitempty"`
+ Action FileAction `url:"action" json:"action"`
+ FilePath string `url:"file_path" json:"file_path"`
PreviousPath string `url:"previous_path,omitempty" json:"previous_path,omitempty"`
Content string `url:"content,omitempty" json:"content,omitempty"`
Encoding string `url:"encoding,omitempty" json:"encoding,omitempty"`
}
-// CreateCommitOptions represents the available options for a new commit.
-//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md#create-a-commit-with-multiple-files-and-actions
-type CreateCommitOptions struct {
- BranchName *string `url:"branch_name" json:"branch_name,omitempty"`
- CommitMessage *string `url:"commit_message" json:"commit_message,omitempty"`
- Actions []*CommitAction `url:"actions" json:"actions,omitempty"`
- AuthorEmail *string `url:"author_email,omitempty" json:"author_email,omitempty"`
- AuthorName *string `url:"author_name,omitempty" json:"author_name,omitempty"`
-}
-
-// CreateCommit creates a commit with multiple files and actions.
+// GetCommit gets a specific commit identified by the commit hash or name of a
+// branch or tag.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md#create-a-commit-with-multiple-files-and-actions
-func (s *CommitsService) CreateCommit(pid interface{}, opt *CreateCommitOptions, options ...OptionFunc) (*Commit, *Response, error) {
+// GitLab API docs: https://docs.gitlab.com/ce/api/commits.html#get-a-single-commit
+func (s *CommitsService) GetCommit(pid interface{}, sha string, options ...OptionFunc) (*Commit, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
- u := fmt.Sprintf("projects/%s/repository/commits", url.QueryEscape(project))
+ u := fmt.Sprintf("projects/%s/repository/commits/%s", url.QueryEscape(project), sha)
- req, err := s.client.NewRequest("POST", u, opt, options)
+ req, err := s.client.NewRequest("GET", u, nil, options)
if err != nil {
return nil, nil, err
}
- var c *Commit
- resp, err := s.client.Do(req, &c)
+ c := new(Commit)
+ resp, err := s.client.Do(req, c)
if err != nil {
return nil, resp, err
}
@@ -162,25 +144,34 @@ func (s *CommitsService) CreateCommit(pid interface{}, opt *CreateCommitOptions,
return c, resp, err
}
-// GetCommit gets a specific commit identified by the commit hash or name of a
-// branch or tag.
+// CreateCommitOptions represents the available options for a new commit.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md#get-a-single-commit
-func (s *CommitsService) GetCommit(pid interface{}, sha string, options ...OptionFunc) (*Commit, *Response, error) {
+// GitLab API docs: https://docs.gitlab.com/ce/api/commits.html#create-a-commit-with-multiple-files-and-actions
+type CreateCommitOptions struct {
+ Branch *string `url:"branch" json:"branch"`
+ CommitMessage *string `url:"commit_message" json:"commit_message"`
+ Actions []*CommitAction `url:"actions" json:"actions"`
+ AuthorEmail *string `url:"author_email,omitempty" json:"author_email,omitempty"`
+ AuthorName *string `url:"author_name,omitempty" json:"author_name,omitempty"`
+}
+
+// CreateCommit creates a commit with multiple files and actions.
+//
+// GitLab API docs: https://docs.gitlab.com/ce/api/commits.html#create-a-commit-with-multiple-files-and-actions
+func (s *CommitsService) CreateCommit(pid interface{}, opt *CreateCommitOptions, options ...OptionFunc) (*Commit, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
- u := fmt.Sprintf("projects/%s/repository/commits/%s", url.QueryEscape(project), sha)
+ u := fmt.Sprintf("projects/%s/repository/commits", url.QueryEscape(project))
- req, err := s.client.NewRequest("GET", u, nil, options)
+ req, err := s.client.NewRequest("POST", u, opt, options)
if err != nil {
return nil, nil, err
}
- c := new(Commit)
- resp, err := s.client.Do(req, c)
+ var c *Commit
+ resp, err := s.client.Do(req, &c)
if err != nil {
return nil, resp, err
}
@@ -190,8 +181,7 @@ func (s *CommitsService) GetCommit(pid interface{}, sha string, options ...Optio
// Diff represents a GitLab diff.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/commits.html
type Diff struct {
Diff string `json:"diff"`
NewPath string `json:"new_path"`
@@ -210,7 +200,7 @@ func (d Diff) String() string {
// GetCommitDiff gets the diff of a commit in a project..
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md#get-the-diff-of-a-commit
+// https://docs.gitlab.com/ce/api/commits.html#get-the-diff-of-a-commit
func (s *CommitsService) GetCommitDiff(pid interface{}, sha string, options ...OptionFunc) ([]*Diff, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -234,8 +224,7 @@ func (s *CommitsService) GetCommitDiff(pid interface{}, sha string, options ...O
// CommitComment represents a GitLab commit comment.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/commits.html
type CommitComment struct {
Note string `json:"note"`
Path string `json:"path"`
@@ -262,7 +251,7 @@ func (c CommitComment) String() string {
// GetCommitComments gets the comments of a commit in a project.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md#get-the-comments-of-a-commit
+// https://docs.gitlab.com/ce/api/commits.html#get-the-comments-of-a-commit
func (s *CommitsService) GetCommitComments(pid interface{}, sha string, options ...OptionFunc) ([]*CommitComment, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -288,7 +277,7 @@ func (s *CommitsService) GetCommitComments(pid interface{}, sha string, options
// options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md#post-comment-to-commit
+// https://docs.gitlab.com/ce/api/commits.html#post-comment-to-commit
type PostCommitCommentOptions struct {
Note *string `url:"note,omitempty" json:"note,omitempty"`
Path *string `url:"path" json:"path"`
@@ -301,7 +290,7 @@ type PostCommitCommentOptions struct {
// line_old are required.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md#post-comment-to-commit
+// https://docs.gitlab.com/ce/api/commits.html#post-comment-to-commit
func (s *CommitsService) PostCommitComment(pid interface{}, sha string, opt *PostCommitCommentOptions, options ...OptionFunc) (*CommitComment, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -325,8 +314,7 @@ func (s *CommitsService) PostCommitComment(pid interface{}, sha string, opt *Pos
// GetCommitStatusesOptions represents the available GetCommitStatuses() options.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md#get-the-status-of-a-commit
+// GitLab API docs: https://docs.gitlab.com/ce/api/commits.html#get-the-status-of-a-commit
type GetCommitStatusesOptions struct {
Ref *string `url:"ref,omitempty" json:"ref,omitempty"`
Stage *string `url:"stage,omitempty" json:"stage,omitempty"`
@@ -336,8 +324,7 @@ type GetCommitStatusesOptions struct {
// CommitStatus represents a GitLab commit status.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md#get-the-status-of-a-commit
+// GitLab API docs: https://docs.gitlab.com/ce/api/commits.html#get-the-status-of-a-commit
type CommitStatus struct {
ID int `json:"id"`
SHA string `json:"sha"`
@@ -354,8 +341,7 @@ type CommitStatus struct {
// GetCommitStatuses gets the statuses of a commit in a project.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md#get-the-status-of-a-commit
+// GitLab API docs: https://docs.gitlab.com/ce/api/commits.html#get-the-status-of-a-commit
func (s *CommitsService) GetCommitStatuses(pid interface{}, sha string, opt *GetCommitStatusesOptions, options ...OptionFunc) ([]*CommitStatus, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -379,8 +365,7 @@ func (s *CommitsService) GetCommitStatuses(pid interface{}, sha string, opt *Get
// SetCommitStatusOptions represents the available SetCommitStatus() options.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md#post-the-status-to-commit
+// GitLab API docs: https://docs.gitlab.com/ce/api/commits.html#post-the-status-to-commit
type SetCommitStatusOptions struct {
State BuildState `url:"state" json:"state"`
Ref *string `url:"ref,omitempty" json:"ref,omitempty"`
@@ -404,8 +389,7 @@ const (
// SetCommitStatus sets the status of a commit in a project.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md#post-the-status-to-commit
+// GitLab API docs: https://docs.gitlab.com/ce/api/commits.html#post-the-status-to-commit
func (s *CommitsService) SetCommitStatus(pid interface{}, sha string, opt *SetCommitStatusOptions, options ...OptionFunc) (*CommitStatus, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -429,16 +413,14 @@ func (s *CommitsService) SetCommitStatus(pid interface{}, sha string, opt *SetCo
// CherryPickCommitOptions represents the available options for cherry-picking a commit.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md#cherry-pick-a-commit
+// GitLab API docs: https://docs.gitlab.com/ce/api/commits.html#cherry-pick-a-commit
type CherryPickCommitOptions struct {
TargetBranch *string `url:"branch" json:"branch,omitempty"`
}
// CherryPickCommit sherry picks a commit to a given branch.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md#cherry-pick-a-commit
+// GitLab API docs: https://docs.gitlab.com/ce/api/commits.html#cherry-pick-a-commit
func (s *CommitsService) CherryPickCommit(pid interface{}, sha string, opt *CherryPickCommitOptions, options ...OptionFunc) (*Commit, *Response, error) {
project, err := parseID(pid)
if err != nil {
diff --git a/vendor/github.com/xanzy/go-gitlab/deploy_keys.go b/vendor/github.com/xanzy/go-gitlab/deploy_keys.go
index 00bcf0d..248ce3f 100644
--- a/vendor/github.com/xanzy/go-gitlab/deploy_keys.go
+++ b/vendor/github.com/xanzy/go-gitlab/deploy_keys.go
@@ -1,5 +1,5 @@
//
-// Copyright 2015, Sander van Harmelen
+// Copyright 2017, Sander van Harmelen
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -25,8 +25,7 @@ import (
// DeployKeysService handles communication with the keys related methods
// of the GitLab API.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/deploy_keys.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/deploy_keys.html
type DeployKeysService struct {
client *Client
}
@@ -44,41 +43,60 @@ func (k DeployKey) String() string {
return Stringify(k)
}
-// ListDeployKeys gets a list of a project's deploy keys
+// ListAllDeployKeys gets a list of all deploy keys
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/deploy_keys.md#list-deploy-keys
-func (s *DeployKeysService) ListDeployKeys(pid interface{}, options ...OptionFunc) ([]*DeployKey, *Response, error) {
+// https://docs.gitlab.com/ce/api/deploy_keys.html#list-all-deploy-keys
+func (s *DeployKeysService) ListAllDeployKeys(options ...OptionFunc) ([]*DeployKey, *Response, error) {
+ req, err := s.client.NewRequest("GET", "deploy_keys", nil, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var ks []*DeployKey
+ resp, err := s.client.Do(req, &ks)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return ks, resp, err
+}
+
+// ListProjectDeployKeys gets a list of a project's deploy keys
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/deploy_keys.html#list-project-deploy-keys
+func (s *DeployKeysService) ListProjectDeployKeys(pid interface{}, options ...OptionFunc) ([]*DeployKey, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
- u := fmt.Sprintf("projects/%s/keys", url.QueryEscape(project))
+ u := fmt.Sprintf("projects/%s/deploy_keys", url.QueryEscape(project))
req, err := s.client.NewRequest("GET", u, nil, options)
if err != nil {
return nil, nil, err
}
- var k []*DeployKey
- resp, err := s.client.Do(req, &k)
+ var ks []*DeployKey
+ resp, err := s.client.Do(req, &ks)
if err != nil {
return nil, resp, err
}
- return k, resp, err
+ return ks, resp, err
}
// GetDeployKey gets a single deploy key.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/deploy_keys.md#single-deploy-key
+// https://docs.gitlab.com/ce/api/deploy_keys.html#single-deploy-key
func (s *DeployKeysService) GetDeployKey(pid interface{}, deployKey int, options ...OptionFunc) (*DeployKey, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
- u := fmt.Sprintf("projects/%s/keys/%d", url.QueryEscape(project), deployKey)
+ u := fmt.Sprintf("projects/%s/deploy_keys/%d", url.QueryEscape(project), deployKey)
req, err := s.client.NewRequest("GET", u, nil, options)
if err != nil {
@@ -97,7 +115,7 @@ func (s *DeployKeysService) GetDeployKey(pid interface{}, deployKey int, options
// AddDeployKeyOptions represents the available ADDDeployKey() options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/deploy_keys.md#add-deploy-key
+// https://docs.gitlab.com/ce/api/deploy_keys.html#add-deploy-key
type AddDeployKeyOptions struct {
Title *string `url:"title,omitempty" json:"title,omitempty"`
Key *string `url:"key,omitempty" json:"key,omitempty"`
@@ -109,13 +127,13 @@ type AddDeployKeyOptions struct {
// original one was is accessible by same user.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/deploy_keys.md#add-deploy-key
+// https://docs.gitlab.com/ce/api/deploy_keys.html#add-deploy-key
func (s *DeployKeysService) AddDeployKey(pid interface{}, opt *AddDeployKeyOptions, options ...OptionFunc) (*DeployKey, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
- u := fmt.Sprintf("projects/%s/keys", url.QueryEscape(project))
+ u := fmt.Sprintf("projects/%s/deploy_keys", url.QueryEscape(project))
req, err := s.client.NewRequest("POST", u, opt, options)
if err != nil {
@@ -134,13 +152,13 @@ func (s *DeployKeysService) AddDeployKey(pid interface{}, opt *AddDeployKeyOptio
// DeleteDeployKey deletes a deploy key from a project.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/deploy_keys.md#delete-deploy-key
+// https://docs.gitlab.com/ce/api/deploy_keys.html#delete-deploy-key
func (s *DeployKeysService) DeleteDeployKey(pid interface{}, deployKey int, options ...OptionFunc) (*Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, err
}
- u := fmt.Sprintf("projects/%s/keys/%d", url.QueryEscape(project), deployKey)
+ u := fmt.Sprintf("projects/%s/deploy_keys/%d", url.QueryEscape(project), deployKey)
req, err := s.client.NewRequest("DELETE", u, nil, options)
if err != nil {
@@ -149,3 +167,28 @@ func (s *DeployKeysService) DeleteDeployKey(pid interface{}, deployKey int, opti
return s.client.Do(req, nil)
}
+
+// EnableDeployKey enables a deploy key.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/deploy_keys.html#enable-deploy-key
+func (s *DeployKeysService) EnableDeployKey(pid interface{}, deployKey int, options ...OptionFunc) (*DeployKey, *Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, nil, err
+ }
+ u := fmt.Sprintf("projects/%s/deploy_keys/%d/enable", url.QueryEscape(project), deployKey)
+
+ req, err := s.client.NewRequest("POST", u, nil, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ k := new(DeployKey)
+ resp, err := s.client.Do(req, k)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return k, resp, err
+}
diff --git a/vendor/github.com/xanzy/go-gitlab/events.go b/vendor/github.com/xanzy/go-gitlab/events.go
index 7ba02df..9f63fb0 100644
--- a/vendor/github.com/xanzy/go-gitlab/events.go
+++ b/vendor/github.com/xanzy/go-gitlab/events.go
@@ -1,5 +1,5 @@
//
-// Copyright 2015, Sander van Harmelen
+// Copyright 2017, Sander van Harmelen
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -21,7 +21,7 @@ import "time"
// PushEvent represents a push event.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/web_hooks/web_hooks.md#push-events
+// https://docs.gitlab.com/ce/web_hooks/web_hooks.html#push-events
type PushEvent struct {
ObjectKind string `json:"object_kind"`
Before string `json:"before"`
@@ -34,20 +34,20 @@ type PushEvent struct {
UserAvatar string `json:"user_avatar"`
ProjectID int `json:"project_id"`
Project struct {
- Name string `json:"name"`
- Description string `json:"description"`
- AvatarURL string `json:"avatar_url"`
- GitSSHURL string `json:"git_ssh_url"`
- GitHTTPURL string `json:"git_http_url"`
- Namespace string `json:"namespace"`
- PathWithNamespace string `json:"path_with_namespace"`
- DefaultBranch string `json:"default_branch"`
- Homepage string `json:"homepage"`
- URL string `json:"url"`
- SSHURL string `json:"ssh_url"`
- HTTPURL string `json:"http_url"`
- WebURL string `json:"web_url"`
- VisibilityLevel VisibilityLevelValue `json:"visibility_level"`
+ Name string `json:"name"`
+ Description string `json:"description"`
+ AvatarURL string `json:"avatar_url"`
+ GitSSHURL string `json:"git_ssh_url"`
+ GitHTTPURL string `json:"git_http_url"`
+ Namespace string `json:"namespace"`
+ PathWithNamespace string `json:"path_with_namespace"`
+ DefaultBranch string `json:"default_branch"`
+ Homepage string `json:"homepage"`
+ URL string `json:"url"`
+ SSHURL string `json:"ssh_url"`
+ HTTPURL string `json:"http_url"`
+ WebURL string `json:"web_url"`
+ Visibility VisibilityValue `json:"visibility"`
} `json:"project"`
Repository *Repository `json:"repository"`
Commits []*Commit `json:"commits"`
@@ -57,7 +57,7 @@ type PushEvent struct {
// TagEvent represents a tag event.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/web_hooks/web_hooks.md#tag-events
+// https://docs.gitlab.com/ce/web_hooks/web_hooks.html#tag-events
type TagEvent struct {
ObjectKind string `json:"object_kind"`
Before string `json:"before"`
@@ -69,20 +69,20 @@ type TagEvent struct {
UserAvatar string `json:"user_avatar"`
ProjectID int `json:"project_id"`
Project struct {
- Name string `json:"name"`
- Description string `json:"description"`
- AvatarURL string `json:"avatar_url"`
- GitSSHURL string `json:"git_ssh_url"`
- GitHTTPURL string `json:"git_http_url"`
- Namespace string `json:"namespace"`
- PathWithNamespace string `json:"path_with_namespace"`
- DefaultBranch string `json:"default_branch"`
- Homepage string `json:"homepage"`
- URL string `json:"url"`
- SSHURL string `json:"ssh_url"`
- HTTPURL string `json:"http_url"`
- WebURL string `json:"web_url"`
- VisibilityLevel VisibilityLevelValue `json:"visibility_level"`
+ Name string `json:"name"`
+ Description string `json:"description"`
+ AvatarURL string `json:"avatar_url"`
+ GitSSHURL string `json:"git_ssh_url"`
+ GitHTTPURL string `json:"git_http_url"`
+ Namespace string `json:"namespace"`
+ PathWithNamespace string `json:"path_with_namespace"`
+ DefaultBranch string `json:"default_branch"`
+ Homepage string `json:"homepage"`
+ URL string `json:"url"`
+ SSHURL string `json:"ssh_url"`
+ HTTPURL string `json:"http_url"`
+ WebURL string `json:"web_url"`
+ Visibility VisibilityValue `json:"visibility"`
} `json:"project"`
Repository *Repository `json:"repository"`
Commits []*Commit `json:"commits"`
@@ -92,25 +92,25 @@ type TagEvent struct {
// IssueEvent represents a issue event.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/web_hooks/web_hooks.md#issues-events
+// https://docs.gitlab.com/ce/web_hooks/web_hooks.html#issues-events
type IssueEvent struct {
ObjectKind string `json:"object_kind"`
User *User `json:"user"`
Project struct {
- Name string `json:"name"`
- Description string `json:"description"`
- AvatarURL string `json:"avatar_url"`
- GitSSHURL string `json:"git_ssh_url"`
- GitHTTPURL string `json:"git_http_url"`
- Namespace string `json:"namespace"`
- PathWithNamespace string `json:"path_with_namespace"`
- DefaultBranch string `json:"default_branch"`
- Homepage string `json:"homepage"`
- URL string `json:"url"`
- SSHURL string `json:"ssh_url"`
- HTTPURL string `json:"http_url"`
- WebURL string `json:"web_url"`
- VisibilityLevel VisibilityLevelValue `json:"visibility_level"`
+ Name string `json:"name"`
+ Description string `json:"description"`
+ AvatarURL string `json:"avatar_url"`
+ GitSSHURL string `json:"git_ssh_url"`
+ GitHTTPURL string `json:"git_http_url"`
+ Namespace string `json:"namespace"`
+ PathWithNamespace string `json:"path_with_namespace"`
+ DefaultBranch string `json:"default_branch"`
+ Homepage string `json:"homepage"`
+ URL string `json:"url"`
+ SSHURL string `json:"ssh_url"`
+ HTTPURL string `json:"http_url"`
+ WebURL string `json:"web_url"`
+ Visibility VisibilityValue `json:"visibility"`
} `json:"project"`
Repository *Repository `json:"repository"`
ObjectAttributes struct {
@@ -140,26 +140,26 @@ type IssueEvent struct {
// CommitCommentEvent represents a comment on a commit event.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/web_hooks/web_hooks.md#comment-on-commit
+// https://docs.gitlab.com/ce/web_hooks/web_hooks.html#comment-on-commit
type CommitCommentEvent struct {
ObjectKind string `json:"object_kind"`
User *User `json:"user"`
ProjectID int `json:"project_id"`
Project struct {
- Name string `json:"name"`
- Description string `json:"description"`
- AvatarURL string `json:"avatar_url"`
- GitSSHURL string `json:"git_ssh_url"`
- GitHTTPURL string `json:"git_http_url"`
- Namespace string `json:"namespace"`
- PathWithNamespace string `json:"path_with_namespace"`
- DefaultBranch string `json:"default_branch"`
- Homepage string `json:"homepage"`
- URL string `json:"url"`
- SSHURL string `json:"ssh_url"`
- HTTPURL string `json:"http_url"`
- WebURL string `json:"web_url"`
- VisibilityLevel VisibilityLevelValue `json:"visibility_level"`
+ Name string `json:"name"`
+ Description string `json:"description"`
+ AvatarURL string `json:"avatar_url"`
+ GitSSHURL string `json:"git_ssh_url"`
+ GitHTTPURL string `json:"git_http_url"`
+ Namespace string `json:"namespace"`
+ PathWithNamespace string `json:"path_with_namespace"`
+ DefaultBranch string `json:"default_branch"`
+ Homepage string `json:"homepage"`
+ URL string `json:"url"`
+ SSHURL string `json:"ssh_url"`
+ HTTPURL string `json:"http_url"`
+ WebURL string `json:"web_url"`
+ Visibility VisibilityValue `json:"visibility"`
} `json:"project"`
Repository *Repository `json:"repository"`
ObjectAttributes struct {
@@ -192,26 +192,26 @@ type CommitCommentEvent struct {
// MergeCommentEvent represents a comment on a merge event.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/web_hooks/web_hooks.md#comment-on-merge-request
+// https://docs.gitlab.com/ce/web_hooks/web_hooks.html#comment-on-merge-request
type MergeCommentEvent struct {
ObjectKind string `json:"object_kind"`
User *User `json:"user"`
ProjectID int `json:"project_id"`
Project struct {
- Name string `json:"name"`
- Description string `json:"description"`
- AvatarURL string `json:"avatar_url"`
- GitSSHURL string `json:"git_ssh_url"`
- GitHTTPURL string `json:"git_http_url"`
- Namespace string `json:"namespace"`
- PathWithNamespace string `json:"path_with_namespace"`
- DefaultBranch string `json:"default_branch"`
- Homepage string `json:"homepage"`
- URL string `json:"url"`
- SSHURL string `json:"ssh_url"`
- HTTPURL string `json:"http_url"`
- WebURL string `json:"web_url"`
- VisibilityLevel VisibilityLevelValue `json:"visibility_level"`
+ Name string `json:"name"`
+ Description string `json:"description"`
+ AvatarURL string `json:"avatar_url"`
+ GitSSHURL string `json:"git_ssh_url"`
+ GitHTTPURL string `json:"git_http_url"`
+ Namespace string `json:"namespace"`
+ PathWithNamespace string `json:"path_with_namespace"`
+ DefaultBranch string `json:"default_branch"`
+ Homepage string `json:"homepage"`
+ URL string `json:"url"`
+ SSHURL string `json:"ssh_url"`
+ HTTPURL string `json:"http_url"`
+ WebURL string `json:"web_url"`
+ Visibility VisibilityValue `json:"visibility"`
} `json:"project"`
Repository *Repository `json:"repository"`
ObjectAttributes struct {
@@ -236,26 +236,26 @@ type MergeCommentEvent struct {
// IssueCommentEvent represents a comment on an issue event.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/web_hooks/web_hooks.md#comment-on-issue
+// https://docs.gitlab.com/ce/web_hooks/web_hooks.html#comment-on-issue
type IssueCommentEvent struct {
ObjectKind string `json:"object_kind"`
User *User `json:"user"`
ProjectID int `json:"project_id"`
Project struct {
- Name string `json:"name"`
- Description string `json:"description"`
- AvatarURL string `json:"avatar_url"`
- GitSSHURL string `json:"git_ssh_url"`
- GitHTTPURL string `json:"git_http_url"`
- Namespace string `json:"namespace"`
- PathWithNamespace string `json:"path_with_namespace"`
- DefaultBranch string `json:"default_branch"`
- Homepage string `json:"homepage"`
- URL string `json:"url"`
- SSHURL string `json:"ssh_url"`
- HTTPURL string `json:"http_url"`
- WebURL string `json:"web_url"`
- VisibilityLevel VisibilityLevelValue `json:"visibility_level"`
+ Name string `json:"name"`
+ Description string `json:"description"`
+ AvatarURL string `json:"avatar_url"`
+ GitSSHURL string `json:"git_ssh_url"`
+ GitHTTPURL string `json:"git_http_url"`
+ Namespace string `json:"namespace"`
+ PathWithNamespace string `json:"path_with_namespace"`
+ DefaultBranch string `json:"default_branch"`
+ Homepage string `json:"homepage"`
+ URL string `json:"url"`
+ SSHURL string `json:"ssh_url"`
+ HTTPURL string `json:"http_url"`
+ WebURL string `json:"web_url"`
+ Visibility VisibilityValue `json:"visibility"`
} `json:"project"`
Repository *Repository `json:"repository"`
ObjectAttributes struct {
@@ -280,26 +280,26 @@ type IssueCommentEvent struct {
// SnippetCommentEvent represents a comment on a snippet event.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/web_hooks/web_hooks.md#comment-on-code-snippet
+// https://docs.gitlab.com/ce/web_hooks/web_hooks.html#comment-on-code-snippet
type SnippetCommentEvent struct {
ObjectKind string `json:"object_kind"`
User *User `json:"user"`
ProjectID int `json:"project_id"`
Project struct {
- Name string `json:"name"`
- Description string `json:"description"`
- AvatarURL string `json:"avatar_url"`
- GitSSHURL string `json:"git_ssh_url"`
- GitHTTPURL string `json:"git_http_url"`
- Namespace string `json:"namespace"`
- PathWithNamespace string `json:"path_with_namespace"`
- DefaultBranch string `json:"default_branch"`
- Homepage string `json:"homepage"`
- URL string `json:"url"`
- SSHURL string `json:"ssh_url"`
- HTTPURL string `json:"http_url"`
- WebURL string `json:"web_url"`
- VisibilityLevel VisibilityLevelValue `json:"visibility_level"`
+ Name string `json:"name"`
+ Description string `json:"description"`
+ AvatarURL string `json:"avatar_url"`
+ GitSSHURL string `json:"git_ssh_url"`
+ GitHTTPURL string `json:"git_http_url"`
+ Namespace string `json:"namespace"`
+ PathWithNamespace string `json:"path_with_namespace"`
+ DefaultBranch string `json:"default_branch"`
+ Homepage string `json:"homepage"`
+ URL string `json:"url"`
+ SSHURL string `json:"ssh_url"`
+ HTTPURL string `json:"http_url"`
+ WebURL string `json:"web_url"`
+ Visibility VisibilityValue `json:"visibility"`
} `json:"project"`
Repository *Repository `json:"repository"`
ObjectAttributes struct {
@@ -324,25 +324,25 @@ type SnippetCommentEvent struct {
// MergeEvent represents a merge event.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/web_hooks/web_hooks.md#merge-request-events
+// https://docs.gitlab.com/ce/web_hooks/web_hooks.html#merge-request-events
type MergeEvent struct {
ObjectKind string `json:"object_kind"`
User *User `json:"user"`
Project struct {
- Name string `json:"name"`
- Description string `json:"description"`
- AvatarURL string `json:"avatar_url"`
- GitSSHURL string `json:"git_ssh_url"`
- GitHTTPURL string `json:"git_http_url"`
- Namespace string `json:"namespace"`
- PathWithNamespace string `json:"path_with_namespace"`
- DefaultBranch string `json:"default_branch"`
- Homepage string `json:"homepage"`
- URL string `json:"url"`
- SSHURL string `json:"ssh_url"`
- HTTPURL string `json:"http_url"`
- WebURL string `json:"web_url"`
- VisibilityLevel VisibilityLevelValue `json:"visibility_level"`
+ Name string `json:"name"`
+ Description string `json:"description"`
+ AvatarURL string `json:"avatar_url"`
+ GitSSHURL string `json:"git_ssh_url"`
+ GitHTTPURL string `json:"git_http_url"`
+ Namespace string `json:"namespace"`
+ PathWithNamespace string `json:"path_with_namespace"`
+ DefaultBranch string `json:"default_branch"`
+ Homepage string `json:"homepage"`
+ URL string `json:"url"`
+ SSHURL string `json:"ssh_url"`
+ HTTPURL string `json:"http_url"`
+ WebURL string `json:"web_url"`
+ Visibility VisibilityValue `json:"visibility"`
} `json:"project"`
ObjectAttributes struct {
ID int `json:"id"`
@@ -407,25 +407,25 @@ type MergeEvent struct {
// WikiPageEvent represents a wiki page event.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/web_hooks/web_hooks.md#wiki-page-events
+// https://docs.gitlab.com/ce/web_hooks/web_hooks.html#wiki-page-events
type WikiPageEvent struct {
ObjectKind string `json:"object_kind"`
User *User `json:"user"`
Project struct {
- Name string `json:"name"`
- Description string `json:"description"`
- AvatarURL string `json:"avatar_url"`
- GitSSHURL string `json:"git_ssh_url"`
- GitHTTPURL string `json:"git_http_url"`
- Namespace string `json:"namespace"`
- PathWithNamespace string `json:"path_with_namespace"`
- DefaultBranch string `json:"default_branch"`
- Homepage string `json:"homepage"`
- URL string `json:"url"`
- SSHURL string `json:"ssh_url"`
- HTTPURL string `json:"http_url"`
- WebURL string `json:"web_url"`
- VisibilityLevel VisibilityLevelValue `json:"visibility_level"`
+ Name string `json:"name"`
+ Description string `json:"description"`
+ AvatarURL string `json:"avatar_url"`
+ GitSSHURL string `json:"git_ssh_url"`
+ GitHTTPURL string `json:"git_http_url"`
+ Namespace string `json:"namespace"`
+ PathWithNamespace string `json:"path_with_namespace"`
+ DefaultBranch string `json:"default_branch"`
+ Homepage string `json:"homepage"`
+ URL string `json:"url"`
+ SSHURL string `json:"ssh_url"`
+ HTTPURL string `json:"http_url"`
+ WebURL string `json:"web_url"`
+ Visibility VisibilityValue `json:"visibility"`
} `json:"project"`
Wiki struct {
WebURL string `json:"web_url"`
@@ -448,7 +448,7 @@ type WikiPageEvent struct {
// PipelineEvent represents a pipeline event.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/web_hooks/web_hooks.md#pipeline-events
+// https://docs.gitlab.com/ce/web_hooks/web_hooks.html#pipeline-events
type PipelineEvent struct {
ObjectKind string `json:"object_kind"`
ObjectAttributes struct {
@@ -469,20 +469,20 @@ type PipelineEvent struct {
AvatarURL string `json:"avatar_url"`
} `json:"user"`
Project struct {
- Name string `json:"name"`
- Description string `json:"description"`
- AvatarURL string `json:"avatar_url"`
- GitSSHURL string `json:"git_ssh_url"`
- GitHTTPURL string `json:"git_http_url"`
- Namespace string `json:"namespace"`
- PathWithNamespace string `json:"path_with_namespace"`
- DefaultBranch string `json:"default_branch"`
- Homepage string `json:"homepage"`
- URL string `json:"url"`
- SSHURL string `json:"ssh_url"`
- HTTPURL string `json:"http_url"`
- WebURL string `json:"web_url"`
- VisibilityLevel VisibilityLevelValue `json:"visibility_level"`
+ Name string `json:"name"`
+ Description string `json:"description"`
+ AvatarURL string `json:"avatar_url"`
+ GitSSHURL string `json:"git_ssh_url"`
+ GitHTTPURL string `json:"git_http_url"`
+ Namespace string `json:"namespace"`
+ PathWithNamespace string `json:"path_with_namespace"`
+ DefaultBranch string `json:"default_branch"`
+ Homepage string `json:"homepage"`
+ URL string `json:"url"`
+ SSHURL string `json:"ssh_url"`
+ HTTPURL string `json:"http_url"`
+ WebURL string `json:"web_url"`
+ Visibility VisibilityValue `json:"visibility"`
} `json:"project"`
Commit struct {
ID string `json:"id"`
@@ -509,10 +509,15 @@ type PipelineEvent struct {
Username string `json:"username"`
AvatarURL string `json:"avatar_url"`
} `json:"user"`
- Runner string `json:"runner"`
+ Runner struct {
+ ID int `json:"id"`
+ Description string `json:"description"`
+ Active bool `json:"active"`
+ IsShared bool `json:"is_shared"`
+ } `json:"runner"`
ArtifactsFile struct {
Filename string `json:"filename"`
- Size string `json:"size"`
+ Size int `json:"size"`
} `json:"artifacts_file"`
} `json:"builds"`
}
@@ -520,23 +525,23 @@ type PipelineEvent struct {
//BuildEvent represents a build event
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/web_hooks/web_hooks.md#build-events
+// https://docs.gitlab.com/ce/web_hooks/web_hooks.html#build-events
type BuildEvent struct {
- ObjectKind string `json:"object_kind"`
- Ref string `json:"ref"`
- Tag bool `json:"tag"`
- BeforeSha string `json:"before_sha"`
- Sha string `json:"sha"`
- BuildID int `json:"build_id"`
- BuildName string `json:"build_name"`
- BuildStage string `json:"build_stage"`
- BuildStatus string `json:"build_status"`
- BuildStartedAt string `json:"build_started_at"`
- BuildFinishedAt string `json:"build_finished_at"`
- BuildDuration string `json:"build_duration"`
- BuildAllowFailure bool `json:"build_allow_failure"`
- ProjectID int `json:"project_id"`
- ProjectName string `json:"project_name"`
+ ObjectKind string `json:"object_kind"`
+ Ref string `json:"ref"`
+ Tag bool `json:"tag"`
+ BeforeSha string `json:"before_sha"`
+ Sha string `json:"sha"`
+ BuildID int `json:"build_id"`
+ BuildName string `json:"build_name"`
+ BuildStage string `json:"build_stage"`
+ BuildStatus string `json:"build_status"`
+ BuildStartedAt string `json:"build_started_at"`
+ BuildFinishedAt string `json:"build_finished_at"`
+ BuildDuration float64 `json:"build_duration"`
+ BuildAllowFailure bool `json:"build_allow_failure"`
+ ProjectID int `json:"project_id"`
+ ProjectName string `json:"project_name"`
User struct {
ID int `json:"id"`
Name string `json:"name"`
diff --git a/vendor/github.com/xanzy/go-gitlab/feature_flags.go b/vendor/github.com/xanzy/go-gitlab/feature_flags.go
new file mode 100644
index 0000000..b6380ab
--- /dev/null
+++ b/vendor/github.com/xanzy/go-gitlab/feature_flags.go
@@ -0,0 +1,79 @@
+package gitlab
+
+import (
+ "fmt"
+ "net/url"
+)
+
+// FeaturesService handles the communication with the application FeaturesService
+// related methods of the GitLab API.
+//
+// GitLab API docs: https://docs.gitlab.com/ce/api/features.html
+type FeaturesService struct {
+ client *Client
+}
+
+// Feature represents a GitLab feature flag.
+//
+// GitLab API docs: https://docs.gitlab.com/ce/api/features.html
+type Feature struct {
+ Name string `json:"name"`
+ State string `json:"state"`
+ Gates []Gate
+}
+
+// Gate represents a gate of a GitLab feature flag.
+//
+// GitLab API docs: https://docs.gitlab.com/ce/api/features.html
+type Gate struct {
+ Key string `json:"key"`
+ Value interface{} `json:"value"`
+}
+
+func (f Feature) String() string {
+ return Stringify(f)
+}
+
+// ListFeatures gets a list of feature flags
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/features.html#list-all-features
+func (s *FeaturesService) ListFeatures(options ...OptionFunc) ([]*Feature, *Response, error) {
+ req, err := s.client.NewRequest("GET", "features", nil, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var f []*Feature
+ resp, err := s.client.Do(req, &f)
+ if err != nil {
+ return nil, resp, err
+ }
+ return f, resp, err
+}
+
+// SetFeatureFlag sets or creates a feature flag gate
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/features.html#set-or-create-a-feature
+func (s *FeaturesService) SetFeatureFlag(name string, value interface{}, options ...OptionFunc) (*Feature, *Response, error) {
+ u := fmt.Sprintf("features/%s", url.QueryEscape(name))
+
+ opt := struct {
+ Value interface{} `url:"value" json:"value"`
+ }{
+ value,
+ }
+
+ req, err := s.client.NewRequest("POST", u, opt, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ f := &Feature{}
+ resp, err := s.client.Do(req, f)
+ if err != nil {
+ return nil, resp, err
+ }
+ return f, resp, err
+}
diff --git a/vendor/github.com/xanzy/go-gitlab/gitlab.go b/vendor/github.com/xanzy/go-gitlab/gitlab.go
index c2da4c3..80bd1ed 100644
--- a/vendor/github.com/xanzy/go-gitlab/gitlab.go
+++ b/vendor/github.com/xanzy/go-gitlab/gitlab.go
@@ -1,5 +1,5 @@
//
-// Copyright 2015, Sander van Harmelen
+// Copyright 2017, Sander van Harmelen
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -33,21 +33,19 @@ import (
)
const (
- libraryVersion = "0.1.1"
- defaultBaseURL = "https://gitlab.com/api/v3/"
+ libraryVersion = "0.2.0"
+ defaultBaseURL = "https://gitlab.com/api/v4/"
userAgent = "go-gitlab/" + libraryVersion
)
// tokenType represents a token type within GitLab.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/
+// GitLab API docs: https://docs.gitlab.com/ce/api/
type tokenType int
// List of available token type
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/
+// GitLab API docs: https://docs.gitlab.com/ce/api/
const (
privateToken tokenType = iota
oAuthToken
@@ -55,14 +53,12 @@ const (
// AccessLevelValue represents a permission level within GitLab.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/permissions/permissions.md
+// GitLab API docs: https://docs.gitlab.com/ce/permissions/permissions.html
type AccessLevelValue int
// List of available access levels
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/permissions/permissions.md
+// GitLab API docs: https://docs.gitlab.com/ce/permissions/permissions.html
const (
GuestPermissions AccessLevelValue = 10
ReporterPermissions AccessLevelValue = 20
@@ -131,20 +127,18 @@ var notificationLevelTypes = map[string]NotificationLevelValue{
"custom": CustomNotificationLevel,
}
-// VisibilityLevelValue represents a visibility level within GitLab.
+// VisibilityValue represents a visibility level within GitLab.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/
-type VisibilityLevelValue int
+// GitLab API docs: https://docs.gitlab.com/ce/api/
+type VisibilityValue string
// List of available visibility levels
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/
+// GitLab API docs: https://docs.gitlab.com/ce/api/
const (
- PrivateVisibility VisibilityLevelValue = 0
- InternalVisibility VisibilityLevelValue = 10
- PublicVisibility VisibilityLevelValue = 20
+ PrivateVisibility VisibilityValue = "private"
+ InternalVisibility VisibilityValue = "internal"
+ PublicVisibility VisibilityValue = "public"
)
// A Client manages communication with the GitLab API.
@@ -169,11 +163,12 @@ type Client struct {
// Services used for talking to different parts of the GitLab API.
Branches *BranchesService
BuildVariables *BuildVariablesService
- Builds *BuildsService
Commits *CommitsService
DeployKeys *DeployKeysService
+ Features *FeaturesService
Groups *GroupsService
Issues *IssuesService
+ Jobs *JobsService
Labels *LabelsService
MergeRequests *MergeRequestsService
Milestones *MilestonesService
@@ -181,8 +176,10 @@ type Client struct {
Notes *NotesService
NotificationSettings *NotificationSettingsService
Projects *ProjectsService
+ ProjectMembers *ProjectMembersService
ProjectSnippets *ProjectSnippetsService
Pipelines *PipelinesService
+ PipelineTriggers *PipelineTriggersService
Repositories *RepositoriesService
RepositoryFiles *RepositoryFilesService
Services *ServicesService
@@ -190,8 +187,10 @@ type Client struct {
Settings *SettingsService
SystemHooks *SystemHooksService
Tags *TagsService
- TimeStats *TimeStatsService
+ Todos *TodosService
Users *UsersService
+ Version *VersionService
+ Wikis *WikisService
}
// ListOptions specifies the optional parameters to various List methods that
@@ -225,26 +224,33 @@ func newClient(httpClient *http.Client, tokenType tokenType, token string) *Clie
c := &Client{client: httpClient, tokenType: tokenType, token: token, UserAgent: userAgent}
if err := c.SetBaseURL(defaultBaseURL); err != nil {
- // should never happen since defaultBaseURL is our constant
+ // Should never happen since defaultBaseURL is our constant.
panic(err)
}
+ // Create the internal timeStats service.
+ timeStats := &timeStatsService{client: c}
+
+ // Create all the public services.
c.Branches = &BranchesService{client: c}
c.BuildVariables = &BuildVariablesService{client: c}
- c.Builds = &BuildsService{client: c}
c.Commits = &CommitsService{client: c}
c.DeployKeys = &DeployKeysService{client: c}
+ c.Features = &FeaturesService{client: c}
c.Groups = &GroupsService{client: c}
- c.Issues = &IssuesService{client: c}
+ c.Issues = &IssuesService{client: c, timeStats: timeStats}
+ c.Jobs = &JobsService{client: c}
c.Labels = &LabelsService{client: c}
- c.MergeRequests = &MergeRequestsService{client: c}
+ c.MergeRequests = &MergeRequestsService{client: c, timeStats: timeStats}
c.Milestones = &MilestonesService{client: c}
c.Namespaces = &NamespacesService{client: c}
c.Notes = &NotesService{client: c}
c.NotificationSettings = &NotificationSettingsService{client: c}
c.Projects = &ProjectsService{client: c}
+ c.ProjectMembers = &ProjectMembersService{client: c}
c.ProjectSnippets = &ProjectSnippetsService{client: c}
c.Pipelines = &PipelinesService{client: c}
+ c.PipelineTriggers = &PipelineTriggersService{client: c}
c.Repositories = &RepositoriesService{client: c}
c.RepositoryFiles = &RepositoryFilesService{client: c}
c.Services = &ServicesService{client: c}
@@ -252,8 +258,10 @@ func newClient(httpClient *http.Client, tokenType tokenType, token string) *Clie
c.Settings = &SettingsService{client: c}
c.SystemHooks = &SystemHooksService{client: c}
c.Tags = &TagsService{client: c}
- c.TimeStats = &TimeStatsService{client: c}
+ c.Todos = &TodosService{client: c}
c.Users = &UsersService{client: c}
+ c.Version = &VersionService{client: c}
+ c.Wikis = &WikisService{client: c}
return c
}
@@ -347,7 +355,7 @@ type Response struct {
*http.Response
// These fields provide the page values for paginating through a set of
- // results. Any or all of these may be set to the zero value for
+ // results. Any or all of these may be set to the zero value for
// responses that are not part of a paginated set, or for which there
// are no additional pages.
@@ -357,7 +365,7 @@ type Response struct {
LastPage int
}
-// newResponse creats a new Response for the provided http.Response.
+// newResponse creates a new Response for the provided http.Response.
func newResponse(r *http.Response) *Response {
response := &Response{Response: r}
response.populatePageValues()
@@ -365,7 +373,7 @@ func newResponse(r *http.Response) *Response {
}
// populatePageValues parses the HTTP Link response headers and populates the
-// various pagination link values in the Reponse.
+// various pagination link values in the Response.
func (r *Response) populatePageValues() {
if links, ok := r.Response.Header["Link"]; ok && len(links) > 0 {
for _, link := range strings.Split(links[0], ",") {
@@ -436,6 +444,7 @@ func (c *Client) Do(req *http.Request, v interface{}) (*Response, error) {
err = json.NewDecoder(resp.Body).Decode(v)
}
}
+
return response, err
}
@@ -455,7 +464,7 @@ func parseID(id interface{}) (string, error) {
// An ErrorResponse reports one or more errors caused by an API request.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/README.md#data-validation-and-error-reporting
+// https://docs.gitlab.com/ce/api/README.html#data-validation-and-error-reporting
type ErrorResponse struct {
Response *http.Response
Message string
@@ -470,7 +479,7 @@ func (e *ErrorResponse) Error() string {
// CheckResponse checks the API response for errors, and returns them if present.
func CheckResponse(r *http.Response) error {
switch r.StatusCode {
- case 200, 201, 304:
+ case 200, 201, 202, 204, 304:
return nil
}
@@ -534,7 +543,7 @@ func parseError(raw interface{}) string {
// OptionFunc can be passed to all API requests to make the API call as if you were
// another user, provided your private token is from an administrator account.
//
-// GitLab docs: https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/README.md#sudo
+// GitLab docs: https://docs.gitlab.com/ce/api/README.html#sudo
type OptionFunc func(*http.Request) error
// WithSudo takes either a username or user ID and sets the SUDO request header
@@ -602,10 +611,10 @@ func NotificationLevel(v NotificationLevelValue) *NotificationLevelValue {
return p
}
-// VisibilityLevel is a helper routine that allocates a new VisibilityLevelValue
+// Visibility is a helper routine that allocates a new VisibilityValue
// to store v and returns a pointer to it.
-func VisibilityLevel(v VisibilityLevelValue) *VisibilityLevelValue {
- p := new(VisibilityLevelValue)
+func Visibility(v VisibilityValue) *VisibilityValue {
+ p := new(VisibilityValue)
*p = v
return p
}
diff --git a/vendor/github.com/xanzy/go-gitlab/groups.go b/vendor/github.com/xanzy/go-gitlab/groups.go
index 6a42dde..3629a9b 100644
--- a/vendor/github.com/xanzy/go-gitlab/groups.go
+++ b/vendor/github.com/xanzy/go-gitlab/groups.go
@@ -1,5 +1,5 @@
//
-// Copyright 2015, Sander van Harmelen
+// Copyright 2017, Sander van Harmelen
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -18,45 +18,55 @@ package gitlab
import (
"fmt"
+ "net/url"
"time"
)
// GroupsService handles communication with the group related methods of
// the GitLab API.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/groups.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/groups.html
type GroupsService struct {
client *Client
}
// Group represents a GitLab group.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/groups.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/groups.html
type Group struct {
- ID int `json:"id"`
- Name string `json:"name"`
- Path string `json:"path"`
- Description string `json:"description"`
- Projects []*Project `json:"projects"`
- Statistics *StorageStatistics `json:"statistics"`
+ ID int `json:"id"`
+ Name string `json:"name"`
+ Path string `json:"path"`
+ Description string `json:"description"`
+ Visibility *VisibilityValue `json:"visibility"`
+ LFSEnabled bool `json:"lfs_enabled"`
+ AvatarURL string `json:"avatar_url"`
+ WebURL string `json:"web_url"`
+ RequestAccessEnabled bool `json:"request_access_enabled"`
+ FullName string `json:"full_name"`
+ FullPath string `json:"full_path"`
+ ParentID int `json:"parent_id"`
+ Projects []*Project `json:"projects"`
+ Statistics *StorageStatistics `json:"statistics"`
}
// ListGroupsOptions represents the available ListGroups() options.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/groups.md#list-project-groups
+// GitLab API docs: https://docs.gitlab.com/ce/api/groups.html#list-project-groups
type ListGroupsOptions struct {
ListOptions
- Search *string `url:"search,omitempty" json:"search,omitempty"`
- Statistics *bool `url:"statistics,omitempty" json:"statistics,omitempty"`
+ AllAvailable *bool `url:"all_available,omitempty" json:"all_available,omitempty"`
+ OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"`
+ Owned *bool `url:"owned,omitempty" json:"owned,omitempty"`
+ Search *string `url:"search,omitempty" json:"search,omitempty"`
+ Sort *string `url:"sort,omitempty" json:"sort,omitempty"`
+ Statistics *bool `url:"statistics,omitempty" json:"statistics,omitempty"`
}
-// ListGroups gets a list of groups. (As user: my groups, as admin: all groups)
+// ListGroups gets a list of groups (as user: my groups, as admin: all groups).
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/groups.md#list-project-groups
+// https://docs.gitlab.com/ce/api/groups.html#list-project-groups
func (s *GroupsService) ListGroups(opt *ListGroupsOptions, options ...OptionFunc) ([]*Group, *Response, error) {
req, err := s.client.NewRequest("GET", "groups", opt, options)
if err != nil {
@@ -74,14 +84,13 @@ func (s *GroupsService) ListGroups(opt *ListGroupsOptions, options ...OptionFunc
// GetGroup gets all details of a group.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/groups.md#details-of-a-group
+// GitLab API docs: https://docs.gitlab.com/ce/api/groups.html#details-of-a-group
func (s *GroupsService) GetGroup(gid interface{}, options ...OptionFunc) (*Group, *Response, error) {
group, err := parseID(gid)
if err != nil {
return nil, nil, err
}
- u := fmt.Sprintf("groups/%s", group)
+ u := fmt.Sprintf("groups/%s", url.QueryEscape(group))
req, err := s.client.NewRequest("GET", u, nil, options)
if err != nil {
@@ -99,20 +108,21 @@ func (s *GroupsService) GetGroup(gid interface{}, options ...OptionFunc) (*Group
// CreateGroupOptions represents the available CreateGroup() options.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/groups.md#new-group
+// GitLab API docs: https://docs.gitlab.com/ce/api/groups.html#new-group
type CreateGroupOptions struct {
- Name *string `url:"name,omitempty" json:"name,omitempty"`
- Path *string `url:"path,omitempty" json:"path,omitempty"`
- Description *string `url:"description,omitempty" json:"description,omitempty"`
- VisibilityLevel *VisibilityLevelValue `url:"visibility_level" json:"visibility_level,omitempty"`
+ Name *string `url:"name,omitempty" json:"name,omitempty"`
+ Path *string `url:"path,omitempty" json:"path,omitempty"`
+ Description *string `url:"description,omitempty" json:"description,omitempty"`
+ Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"`
+ LFSEnabled *bool `url:"lfs_enabled,omitempty" json:"lfs_enabled,omitempty"`
+ RequestAccessEnabled *bool `url:"request_access_enabled,omitempty" json:"request_access_enabled,omitempty"`
+ ParentID *int `url:"parent_id,omitempty" json:"parent_id,omitempty"`
}
// CreateGroup creates a new project group. Available only for users who can
// create groups.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/groups.md#new-group
+// GitLab API docs: https://docs.gitlab.com/ce/api/groups.html#new-group
func (s *GroupsService) CreateGroup(opt *CreateGroupOptions, options ...OptionFunc) (*Group, *Response, error) {
req, err := s.client.NewRequest("POST", "groups", opt, options)
if err != nil {
@@ -132,13 +142,13 @@ func (s *GroupsService) CreateGroup(opt *CreateGroupOptions, options ...OptionFu
// for admin.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/groups.md#transfer-project-to-group
+// https://docs.gitlab.com/ce/api/groups.html#transfer-project-to-group
func (s *GroupsService) TransferGroup(gid interface{}, project int, options ...OptionFunc) (*Group, *Response, error) {
group, err := parseID(gid)
if err != nil {
return nil, nil, err
}
- u := fmt.Sprintf("groups/%s/projects/%d", group, project)
+ u := fmt.Sprintf("groups/%s/projects/%d", url.QueryEscape(group), project)
req, err := s.client.NewRequest("POST", u, nil, options)
if err != nil {
@@ -154,16 +164,46 @@ func (s *GroupsService) TransferGroup(gid interface{}, project int, options ...O
return g, resp, err
}
+// UpdateGroupOptions represents the set of available options to update a Group;
+// as of today these are exactly the same available when creating a new Group.
+//
+// GitLab API docs: https://docs.gitlab.com/ce/api/groups.html#update-group
+type UpdateGroupOptions CreateGroupOptions
+
+// UpdateGroup updates an existing group; only available to group owners and
+// administrators.
+//
+// GitLab API docs: https://docs.gitlab.com/ce/api/groups.html#update-group
+func (s *GroupsService) UpdateGroup(gid interface{}, opt *UpdateGroupOptions, options ...OptionFunc) (*Group, *Response, error) {
+ group, err := parseID(gid)
+ if err != nil {
+ return nil, nil, err
+ }
+ u := fmt.Sprintf("groups/%s", url.QueryEscape(group))
+
+ req, err := s.client.NewRequest("PUT", u, opt, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ g := new(Group)
+ resp, err := s.client.Do(req, g)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return g, resp, err
+}
+
// DeleteGroup removes group with all projects inside.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/groups.md#remove-group
+// GitLab API docs: https://docs.gitlab.com/ce/api/groups.html#remove-group
func (s *GroupsService) DeleteGroup(gid interface{}, options ...OptionFunc) (*Response, error) {
group, err := parseID(gid)
if err != nil {
return nil, err
}
- u := fmt.Sprintf("groups/%s", group)
+ u := fmt.Sprintf("groups/%s", url.QueryEscape(group))
req, err := s.client.NewRequest("DELETE", u, nil, options)
if err != nil {
@@ -175,8 +215,7 @@ func (s *GroupsService) DeleteGroup(gid interface{}, options ...OptionFunc) (*Re
// SearchGroup get all groups that match your string in their name or path.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/groups.md#search-for-group
+// GitLab API docs: https://docs.gitlab.com/ce/api/groups.html#search-for-group
func (s *GroupsService) SearchGroup(query string, options ...OptionFunc) ([]*Group, *Response, error) {
var q struct {
Search string `url:"search,omitempty" json:"search,omitempty"`
@@ -199,8 +238,7 @@ func (s *GroupsService) SearchGroup(query string, options ...OptionFunc) ([]*Gro
// GroupMember represents a GitLab group member.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/groups.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/members.html
type GroupMember struct {
ID int `json:"id"`
Username string `json:"username"`
@@ -215,7 +253,7 @@ type GroupMember struct {
// options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/groups.md#list-group-members
+// https://docs.gitlab.com/ce/api/members.html#list-all-members-of-a-group-or-project
type ListGroupMembersOptions struct {
ListOptions
}
@@ -224,13 +262,13 @@ type ListGroupMembersOptions struct {
// user.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/groups.md#list-group-members
+// https://docs.gitlab.com/ce/api/members.html#list-all-members-of-a-group-or-project
func (s *GroupsService) ListGroupMembers(gid interface{}, opt *ListGroupMembersOptions, options ...OptionFunc) ([]*GroupMember, *Response, error) {
group, err := parseID(gid)
if err != nil {
return nil, nil, err
}
- u := fmt.Sprintf("groups/%s/members", group)
+ u := fmt.Sprintf("groups/%s/members", url.QueryEscape(group))
req, err := s.client.NewRequest("GET", u, opt, options)
if err != nil {
@@ -250,7 +288,7 @@ func (s *GroupsService) ListGroupMembers(gid interface{}, opt *ListGroupMembersO
// options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/groups.md#list-a-group-s-projects
+// https://docs.gitlab.com/ce/api/groups.html#list-a-group-s-projects
type ListGroupProjectsOptions struct {
ListOptions
}
@@ -258,13 +296,13 @@ type ListGroupProjectsOptions struct {
// ListGroupProjects get a list of group projects
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/groups.md#list-a-group-s-projects
+// https://docs.gitlab.com/ce/api/groups.html#list-a-group-s-projects
func (s *GroupsService) ListGroupProjects(gid interface{}, opt *ListGroupProjectsOptions, options ...OptionFunc) ([]*Project, *Response, error) {
group, err := parseID(gid)
if err != nil {
return nil, nil, err
}
- u := fmt.Sprintf("groups/%s/projects", group)
+ u := fmt.Sprintf("groups/%s/projects", url.QueryEscape(group))
req, err := s.client.NewRequest("GET", u, opt, options)
if err != nil {
@@ -283,22 +321,23 @@ func (s *GroupsService) ListGroupProjects(gid interface{}, opt *ListGroupProject
// AddGroupMemberOptions represents the available AddGroupMember() options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/groups.md#add-group-member
+// https://docs.gitlab.com/ce/api/members.html#add-a-member-to-a-group-or-project
type AddGroupMemberOptions struct {
UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"`
AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"`
+ ExpiresAt *string `url:"expires_at,omitempty" json:"expires_at"`
}
// AddGroupMember adds a user to the list of group members.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/groups.md#list-group-members
+// https://docs.gitlab.com/ce/api/members.html#add-a-member-to-a-group-or-project
func (s *GroupsService) AddGroupMember(gid interface{}, opt *AddGroupMemberOptions, options ...OptionFunc) (*GroupMember, *Response, error) {
group, err := parseID(gid)
if err != nil {
return nil, nil, err
}
- u := fmt.Sprintf("groups/%s/members", group)
+ u := fmt.Sprintf("groups/%s/members", url.QueryEscape(group))
req, err := s.client.NewRequest("POST", u, opt, options)
if err != nil {
@@ -314,25 +353,26 @@ func (s *GroupsService) AddGroupMember(gid interface{}, opt *AddGroupMemberOptio
return g, resp, err
}
-// UpdateGroupMemberOptions represents the available UpdateGroupMember()
+// EditGroupMemberOptions represents the available EditGroupMember()
// options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/groups.md#edit-group-team-member
-type UpdateGroupMemberOptions struct {
+// https://docs.gitlab.com/ce/api/members.html#edit-a-member-of-a-group-or-project
+type EditGroupMemberOptions struct {
AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"`
+ ExpiresAt *string `url:"expires_at,omitempty" json:"expires_at"`
}
-// UpdateGroupMember updates a group team member to a specified access level.
+// EditGroupMember updates a member of a group.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/groups.md#list-group-members
-func (s *GroupsService) UpdateGroupMember(gid interface{}, user int, opt *UpdateGroupMemberOptions, options ...OptionFunc) (*GroupMember, *Response, error) {
+// https://docs.gitlab.com/ce/api/members.html#edit-a-member-of-a-group-or-project
+func (s *GroupsService) EditGroupMember(gid interface{}, user int, opt *EditGroupMemberOptions, options ...OptionFunc) (*GroupMember, *Response, error) {
group, err := parseID(gid)
if err != nil {
return nil, nil, err
}
- u := fmt.Sprintf("groups/%s/members/%d", group, user)
+ u := fmt.Sprintf("groups/%s/members/%d", url.QueryEscape(group), user)
req, err := s.client.NewRequest("PUT", u, opt, options)
if err != nil {
@@ -351,13 +391,13 @@ func (s *GroupsService) UpdateGroupMember(gid interface{}, user int, opt *Update
// RemoveGroupMember removes user from user team.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/groups.md#remove-user-from-user-team
+// https://docs.gitlab.com/ce/api/members.html#remove-a-member-from-a-group-or-project
func (s *GroupsService) RemoveGroupMember(gid interface{}, user int, options ...OptionFunc) (*Response, error) {
group, err := parseID(gid)
if err != nil {
return nil, err
}
- u := fmt.Sprintf("groups/%s/members/%d", group, user)
+ u := fmt.Sprintf("groups/%s/members/%d", url.QueryEscape(group), user)
req, err := s.client.NewRequest("DELETE", u, nil, options)
if err != nil {
diff --git a/vendor/github.com/xanzy/go-gitlab/issues.go b/vendor/github.com/xanzy/go-gitlab/issues.go
index 62b2d8f..6ab1df2 100644
--- a/vendor/github.com/xanzy/go-gitlab/issues.go
+++ b/vendor/github.com/xanzy/go-gitlab/issues.go
@@ -1,5 +1,5 @@
//
-// Copyright 2015, Sander van Harmelen
+// Copyright 2017, Sander van Harmelen
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -27,16 +27,15 @@ import (
// IssuesService handles communication with the issue related methods
// of the GitLab API.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/issues.html
type IssuesService struct {
- client *Client
+ client *Client
+ timeStats *timeStatsService
}
// Issue represents a GitLab issue.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/issues.html
type Issue struct {
ID int `json:"id"`
IID int `json:"iid"`
@@ -85,21 +84,26 @@ func (l *Labels) MarshalJSON() ([]byte, error) {
// ListIssuesOptions represents the available ListIssues() options.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md#list-issues
+// GitLab API docs: https://docs.gitlab.com/ce/api/issues.html#list-issues
type ListIssuesOptions struct {
ListOptions
- State *string `url:"state,omitempty" json:"state,omitempty"`
- Labels Labels `url:"labels,comma,omitempty" json:"labels,omitempty"`
- OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"`
- Sort *string `url:"sort,omitempty" json:"sort,omitempty"`
+ State *string `url:"state,omitempty" json:"state,omitempty"`
+ Labels Labels `url:"labels,comma,omitempty" json:"labels,omitempty"`
+ Milestone *string `url:"milestone,omitempty" json:"milestone,omitempty"`
+ Scope *string `url:"scope,omitempty" json:"scope,omitempty"`
+ AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"`
+ AssigneeID *int `url:"assignee_id,omitempty" json:"assignee_id,omitempty"`
+ MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"`
+ IIDs []int `url:"iids[],omitempty" json:"iids,omitempty"`
+ OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"`
+ Sort *string `url:"sort,omitempty" json:"sort,omitempty"`
+ Search *string `url:"search,omitempty" json:"search,omitempty"`
}
// ListIssues gets all issues created by authenticated user. This function
// takes pagination parameters page and per_page to restrict the list of issues.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md#list-issues
+// GitLab API docs: https://docs.gitlab.com/ce/api/issues.html#list-issues
func (s *IssuesService) ListIssues(opt *ListIssuesOptions, options ...OptionFunc) ([]*Issue, *Response, error) {
req, err := s.client.NewRequest("GET", "issues", opt, options)
if err != nil {
@@ -115,25 +119,73 @@ func (s *IssuesService) ListIssues(opt *ListIssuesOptions, options ...OptionFunc
return i, resp, err
}
+// ListGroupIssuesOptions represents the available ListGroupIssues() options.
+//
+// GitLab API docs: https://docs.gitlab.com/ce/api/issues.html#list-group-issues
+type ListGroupIssuesOptions struct {
+ ListOptions
+ State *string `url:"state,omitempty" json:"state,omitempty"`
+ Labels Labels `url:"labels,comma,omitempty" json:"labels,omitempty"`
+ IIDs []int `url:"iids[],omitempty" json:"iids,omitempty"`
+ Milestone *string `url:"milestone,omitempty" json:"milestone,omitempty"`
+ Scope *string `url:"scope,omitempty" json:"scope,omitempty"`
+ AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"`
+ AssigneeID *int `url:"assignee_id,omitempty" json:"assignee_id,omitempty"`
+ MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"`
+ OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"`
+ Sort *string `url:"sort,omitempty" json:"sort,omitempty"`
+ Search *string `url:"search,omitempty" json:"search,omitempty"`
+}
+
+// ListGroupIssues gets a list of group issues. This function accepts
+// pagination parameters page and per_page to return the list of group issues.
+//
+// GitLab API docs: https://docs.gitlab.com/ce/api/issues.html#list-group-issues
+func (s *IssuesService) ListGroupIssues(pid interface{}, opt *ListGroupIssuesOptions, options ...OptionFunc) ([]*Issue, *Response, error) {
+ group, err := parseID(pid)
+ if err != nil {
+ return nil, nil, err
+ }
+ u := fmt.Sprintf("group/%s/issues", url.QueryEscape(group))
+
+ req, err := s.client.NewRequest("GET", u, opt, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var i []*Issue
+ resp, err := s.client.Do(req, &i)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return i, resp, err
+}
+
// ListProjectIssuesOptions represents the available ListProjectIssues() options.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md#list-issues
+// GitLab API docs: https://docs.gitlab.com/ce/api/issues.html#list-project-issues
type ListProjectIssuesOptions struct {
ListOptions
- IID *int `url:"iid,omitempty" json:"iid,omitempty"`
- State *string `url:"state,omitempty" json:"state,omitempty"`
- Labels Labels `url:"labels,comma,omitempty" json:"labels,omitempty"`
- Milestone *string `url:"milestone,omitempty" json:"milestone,omitempty"`
- OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"`
- Sort *string `url:"sort,omitempty" json:"sort,omitempty"`
+ IIDs []int `url:"iids[],omitempty" json:"iids,omitempty"`
+ State *string `url:"state,omitempty" json:"state,omitempty"`
+ Labels Labels `url:"labels,comma,omitempty" json:"labels,omitempty"`
+ Milestone *string `url:"milestone,omitempty" json:"milestone,omitempty"`
+ Scope *string `url:"scope,omitempty" json:"scope,omitempty"`
+ AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"`
+ AssigneeID *int `url:"assignee_id,omitempty" json:"assignee_id,omitempty"`
+ MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"`
+ OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"`
+ Sort *string `url:"sort,omitempty" json:"sort,omitempty"`
+ Search *string `url:"search,omitempty" json:"search,omitempty"`
+ CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"`
+ CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"`
}
// ListProjectIssues gets a list of project issues. This function accepts
// pagination parameters page and per_page to return the list of project issues.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md#list-project-issues
+// GitLab API docs: https://docs.gitlab.com/ce/api/issues.html#list-project-issues
func (s *IssuesService) ListProjectIssues(pid interface{}, opt *ListProjectIssuesOptions, options ...OptionFunc) ([]*Issue, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -157,8 +209,7 @@ func (s *IssuesService) ListProjectIssues(pid interface{}, opt *ListProjectIssue
// GetIssue gets a single project issue.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md#single-issues
+// GitLab API docs: https://docs.gitlab.com/ce/api/issues.html#single-issues
func (s *IssuesService) GetIssue(pid interface{}, issue int, options ...OptionFunc) (*Issue, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -182,8 +233,7 @@ func (s *IssuesService) GetIssue(pid interface{}, issue int, options ...OptionFu
// CreateIssueOptions represents the available CreateIssue() options.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md#new-issues
+// GitLab API docs: https://docs.gitlab.com/ce/api/issues.html#new-issues
type CreateIssueOptions struct {
Title *string `url:"title,omitempty" json:"title,omitempty"`
Description *string `url:"description,omitempty" json:"description,omitempty"`
@@ -194,8 +244,7 @@ type CreateIssueOptions struct {
// CreateIssue creates a new project issue.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md#new-issues
+// GitLab API docs: https://docs.gitlab.com/ce/api/issues.html#new-issues
func (s *IssuesService) CreateIssue(pid interface{}, opt *CreateIssueOptions, options ...OptionFunc) (*Issue, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -219,8 +268,7 @@ func (s *IssuesService) CreateIssue(pid interface{}, opt *CreateIssueOptions, op
// UpdateIssueOptions represents the available UpdateIssue() options.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md#edit-issues
+// GitLab API docs: https://docs.gitlab.com/ce/api/issues.html#edit-issues
type UpdateIssueOptions struct {
Title *string `url:"title,omitempty" json:"title,omitempty"`
Description *string `url:"description,omitempty" json:"description,omitempty"`
@@ -233,8 +281,7 @@ type UpdateIssueOptions struct {
// UpdateIssue updates an existing project issue. This function is also used
// to mark an issue as closed.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md#edit-issues
+// GitLab API docs: https://docs.gitlab.com/ce/api/issues.html#edit-issues
func (s *IssuesService) UpdateIssue(pid interface{}, issue int, opt *UpdateIssueOptions, options ...OptionFunc) (*Issue, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -258,8 +305,7 @@ func (s *IssuesService) UpdateIssue(pid interface{}, issue int, opt *UpdateIssue
// DeleteIssue deletes a single project issue.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md#delete-an-issue
+// GitLab API docs: https://docs.gitlab.com/ce/api/issues.html#delete-an-issue
func (s *IssuesService) DeleteIssue(pid interface{}, issue int, options ...OptionFunc) (*Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -274,3 +320,43 @@ func (s *IssuesService) DeleteIssue(pid interface{}, issue int, options ...Optio
return s.client.Do(req, nil)
}
+
+// SetTimeEstimate sets the time estimate for a single project issue.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/issues.html#set-a-time-estimate-for-an-issue
+func (s *IssuesService) SetTimeEstimate(pid interface{}, issue int, opt *SetTimeEstimateOptions, options ...OptionFunc) (*TimeStats, *Response, error) {
+ return s.timeStats.setTimeEstimate(pid, "issues", issue, opt, options...)
+}
+
+// ResetTimeEstimate resets the time estimate for a single project issue.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/issues.html#reset-the-time-estimate-for-an-issue
+func (s *IssuesService) ResetTimeEstimate(pid interface{}, issue int, options ...OptionFunc) (*TimeStats, *Response, error) {
+ return s.timeStats.resetTimeEstimate(pid, "issues", issue, options...)
+}
+
+// AddSpentTime adds spent time for a single project issue.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/issues.html#add-spent-time-for-an-issue
+func (s *IssuesService) AddSpentTime(pid interface{}, issue int, opt *AddSpentTimeOptions, options ...OptionFunc) (*TimeStats, *Response, error) {
+ return s.timeStats.addSpentTime(pid, "issues", issue, opt, options...)
+}
+
+// ResetSpentTime resets the spent time for a single project issue.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/issues.html#reset-spent-time-for-an-issue
+func (s *IssuesService) ResetSpentTime(pid interface{}, issue int, options ...OptionFunc) (*TimeStats, *Response, error) {
+ return s.timeStats.resetSpentTime(pid, "issues", issue, options...)
+}
+
+// GetTimeSpent gets the spent time for a single project issue.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/issues.html#get-time-tracking-stats
+func (s *IssuesService) GetTimeSpent(pid interface{}, issue int, options ...OptionFunc) (*TimeStats, *Response, error) {
+ return s.timeStats.getTimeSpent(pid, "issues", issue, options...)
+}
diff --git a/vendor/github.com/xanzy/go-gitlab/jobs.go b/vendor/github.com/xanzy/go-gitlab/jobs.go
new file mode 100644
index 0000000..e8b4f99
--- /dev/null
+++ b/vendor/github.com/xanzy/go-gitlab/jobs.go
@@ -0,0 +1,349 @@
+//
+// Copyright 2017, Arkbriar
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package gitlab
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "net/url"
+ "time"
+)
+
+// ListJobsOptions are options for two list apis
+type ListJobsOptions struct {
+ ListOptions
+ Scope []BuildState `url:"scope,omitempty" json:"scope,omitempty"`
+}
+
+// JobsService handles communication with the ci builds related methods
+// of the GitLab API.
+//
+// GitLab API docs: https://docs.gitlab.com/ce/api/jobs.html
+type JobsService struct {
+ client *Client
+}
+
+// Job represents a ci build.
+//
+// GitLab API docs: https://docs.gitlab.com/ce/api/jobs.html
+type Job struct {
+ Commit *Commit `json:"commit"`
+ CreatedAt *time.Time `json:"created_at"`
+ ArtifactsFile struct {
+ Filename string `json:"filename"`
+ Size int `json:"size"`
+ } `json:"artifacts_file"`
+ FinishedAt *time.Time `json:"finished_at"`
+ ID int `json:"id"`
+ Name string `json:"name"`
+ Ref string `json:"ref"`
+ Runner struct {
+ ID int `json:"id"`
+ Description string `json:"description"`
+ Active bool `json:"active"`
+ IsShared bool `json:"is_shared"`
+ Name string `json:"name"`
+ } `json:"runner"`
+ Stage string `json:"stage"`
+ StartedAt *time.Time `json:"started_at"`
+ Status string `json:"status"`
+ Tag bool `json:"tag"`
+ User *User `json:"user"`
+}
+
+// ListProjectJobs gets a list of jobs in a project.
+//
+// The scope of jobs to show, one or array of: created, pending, running,
+// failed, success, canceled, skipped; showing all jobs if none provided
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/jobs.html#list-project-jobs
+func (s *JobsService) ListProjectJobs(pid interface{}, opts *ListJobsOptions, options ...OptionFunc) ([]Job, *Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, nil, err
+ }
+ u := fmt.Sprintf("projects/%s/jobs", url.QueryEscape(project))
+
+ req, err := s.client.NewRequest("GET", u, opts, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var jobs []Job
+ resp, err := s.client.Do(req, &jobs)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return jobs, resp, err
+}
+
+// ListPipelineJobs gets a list of jobs for specific pipeline in a
+// project. If the pipeline ID is not found, it will respond with 404.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/jobs.html#list-pipeline-jobs
+func (s *JobsService) ListPipelineJobs(pid interface{}, pipelineID int, opts *ListJobsOptions, options ...OptionFunc) ([]Job, *Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, nil, err
+ }
+ u := fmt.Sprintf("projects/%s/pipelines/%d/jobs", project, pipelineID)
+
+ req, err := s.client.NewRequest("GET", u, opts, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var jobs []Job
+ resp, err := s.client.Do(req, &jobs)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return jobs, resp, err
+}
+
+// GetJob gets a single job of a project.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/jobs.html#get-a-single-job
+func (s *JobsService) GetJob(pid interface{}, jobID int, options ...OptionFunc) (*Job, *Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, nil, err
+ }
+ u := fmt.Sprintf("projects/%s/jobs/%d", project, jobID)
+
+ req, err := s.client.NewRequest("GET", u, nil, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ job := new(Job)
+ resp, err := s.client.Do(req, job)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return job, resp, err
+}
+
+// GetJobArtifacts get jobs artifacts of a project
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/jobs.html#get-job-artifacts
+func (s *JobsService) GetJobArtifacts(pid interface{}, jobID int, options ...OptionFunc) (io.Reader, *Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, nil, err
+ }
+ u := fmt.Sprintf("projects/%s/jobs/%d/artifacts", project, jobID)
+
+ req, err := s.client.NewRequest("GET", u, nil, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ artifactsBuf := new(bytes.Buffer)
+ resp, err := s.client.Do(req, artifactsBuf)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return artifactsBuf, resp, err
+}
+
+// DownloadArtifactsFile download the artifacts file from the given
+// reference name and job provided the job finished successfully.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/jobs.html#download-the-artifacts-file
+func (s *JobsService) DownloadArtifactsFile(pid interface{}, refName string, job string, options ...OptionFunc) (io.Reader, *Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, nil, err
+ }
+ u := fmt.Sprintf("projects/%s/jobs/artifacts/%s/download?job=%s", project, refName, job)
+
+ req, err := s.client.NewRequest("GET", u, nil, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ artifactsBuf := new(bytes.Buffer)
+ resp, err := s.client.Do(req, artifactsBuf)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return artifactsBuf, resp, err
+}
+
+// GetTraceFile gets a trace of a specific job of a project
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/jobs.html#get-a-trace-file
+func (s *JobsService) GetTraceFile(pid interface{}, jobID int, options ...OptionFunc) (io.Reader, *Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, nil, err
+ }
+ u := fmt.Sprintf("projects/%s/jobs/%d/trace", project, jobID)
+
+ req, err := s.client.NewRequest("GET", u, nil, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ traceBuf := new(bytes.Buffer)
+ resp, err := s.client.Do(req, traceBuf)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return traceBuf, resp, err
+}
+
+// CancelJob cancels a single job of a project.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/jobs.html#cancel-a-job
+func (s *JobsService) CancelJob(pid interface{}, jobID int, options ...OptionFunc) (*Job, *Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, nil, err
+ }
+ u := fmt.Sprintf("projects/%s/jobs/%d/cancel", project, jobID)
+
+ req, err := s.client.NewRequest("POST", u, nil, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ job := new(Job)
+ resp, err := s.client.Do(req, job)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return job, resp, err
+}
+
+// RetryJob retries a single job of a project
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/jobs.html#retry-a-job
+func (s *JobsService) RetryJob(pid interface{}, jobID int, options ...OptionFunc) (*Job, *Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, nil, err
+ }
+ u := fmt.Sprintf("projects/%s/jobs/%d/retry", project, jobID)
+
+ req, err := s.client.NewRequest("POST", u, nil, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ job := new(Job)
+ resp, err := s.client.Do(req, job)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return job, resp, err
+}
+
+// EraseJob erases a single job of a project, removes a job
+// artifacts and a job trace.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/jobs.html#erase-a-job
+func (s *JobsService) EraseJob(pid interface{}, jobID int, options ...OptionFunc) (*Job, *Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, nil, err
+ }
+ u := fmt.Sprintf("projects/%s/jobs/%d/erase", project, jobID)
+
+ req, err := s.client.NewRequest("POST", u, nil, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ job := new(Job)
+ resp, err := s.client.Do(req, job)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return job, resp, err
+}
+
+// KeepArtifacts prevents artifacts from being deleted when
+// expiration is set.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/jobs.html#keep-artifacts
+func (s *JobsService) KeepArtifacts(pid interface{}, jobID int, options ...OptionFunc) (*Job, *Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, nil, err
+ }
+ u := fmt.Sprintf("projects/%s/jobs/%d/artifacts/keep", project, jobID)
+
+ req, err := s.client.NewRequest("POST", u, nil, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ job := new(Job)
+ resp, err := s.client.Do(req, job)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return job, resp, err
+}
+
+// PlayJob triggers a nanual action to start a job.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/jobs.html#play-a-job
+func (s *JobsService) PlayJob(pid interface{}, jobID int, options ...OptionFunc) (*Job, *Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, nil, err
+ }
+ u := fmt.Sprintf("projects/%s/jobs/%d/play", project, jobID)
+
+ req, err := s.client.NewRequest("POST", u, nil, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ job := new(Job)
+ resp, err := s.client.Do(req, job)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return job, resp, err
+}
diff --git a/vendor/github.com/xanzy/go-gitlab/labels.go b/vendor/github.com/xanzy/go-gitlab/labels.go
index 3c936ff..10ede6a 100644
--- a/vendor/github.com/xanzy/go-gitlab/labels.go
+++ b/vendor/github.com/xanzy/go-gitlab/labels.go
@@ -1,5 +1,5 @@
//
-// Copyright 2015, Sander van Harmelen
+// Copyright 2017, Sander van Harmelen
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -24,16 +24,14 @@ import (
// LabelsService handles communication with the label related methods
// of the GitLab API.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/labels.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/labels.html
type LabelsService struct {
client *Client
}
// Label represents a GitLab label.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/labels.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/labels.html
type Label struct {
Name string `json:"name"`
Color string `json:"color"`
@@ -49,8 +47,7 @@ func (l Label) String() string {
// ListLabels gets all labels for given project.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/labels.md#list-labels
+// GitLab API docs: https://docs.gitlab.com/ce/api/labels.html#list-labels
func (s *LabelsService) ListLabels(pid interface{}, options ...OptionFunc) ([]*Label, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -74,8 +71,7 @@ func (s *LabelsService) ListLabels(pid interface{}, options ...OptionFunc) ([]*L
// CreateLabelOptions represents the available CreateLabel() options.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/labels.md#create-a-new-label
+// GitLab API docs: https://docs.gitlab.com/ce/api/labels.html#create-a-new-label
type CreateLabelOptions struct {
Name *string `url:"name,omitempty" json:"name,omitempty"`
Color *string `url:"color,omitempty" json:"color,omitempty"`
@@ -85,8 +81,7 @@ type CreateLabelOptions struct {
// CreateLabel creates a new label for given repository with given name and
// color.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/labels.md#create-a-new-label
+// GitLab API docs: https://docs.gitlab.com/ce/api/labels.html#create-a-new-label
func (s *LabelsService) CreateLabel(pid interface{}, opt *CreateLabelOptions, options ...OptionFunc) (*Label, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -110,16 +105,14 @@ func (s *LabelsService) CreateLabel(pid interface{}, opt *CreateLabelOptions, op
// DeleteLabelOptions represents the available DeleteLabel() options.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/labels.md#delete-a-label
+// GitLab API docs: https://docs.gitlab.com/ce/api/labels.html#delete-a-label
type DeleteLabelOptions struct {
Name *string `url:"name,omitempty" json:"name,omitempty"`
}
// DeleteLabel deletes a label given by its name.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/labels.md#delete-a-label
+// GitLab API docs: https://docs.gitlab.com/ce/api/labels.html#delete-a-label
func (s *LabelsService) DeleteLabel(pid interface{}, opt *DeleteLabelOptions, options ...OptionFunc) (*Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -137,8 +130,7 @@ func (s *LabelsService) DeleteLabel(pid interface{}, opt *DeleteLabelOptions, op
// UpdateLabelOptions represents the available UpdateLabel() options.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/labels.md#delete-a-label
+// GitLab API docs: https://docs.gitlab.com/ce/api/labels.html#delete-a-label
type UpdateLabelOptions struct {
Name *string `url:"name,omitempty" json:"name,omitempty"`
NewName *string `url:"new_name,omitempty" json:"new_name,omitempty"`
@@ -149,8 +141,7 @@ type UpdateLabelOptions struct {
// UpdateLabel updates an existing label with new name or now color. At least
// one parameter is required, to update the label.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/labels.md#edit-an-existing-label
+// GitLab API docs: https://docs.gitlab.com/ce/api/labels.html#edit-an-existing-label
func (s *LabelsService) UpdateLabel(pid interface{}, opt *UpdateLabelOptions, options ...OptionFunc) (*Label, *Response, error) {
project, err := parseID(pid)
if err != nil {
diff --git a/vendor/github.com/xanzy/go-gitlab/merge_requests.go b/vendor/github.com/xanzy/go-gitlab/merge_requests.go
index 34405d8..e284793 100644
--- a/vendor/github.com/xanzy/go-gitlab/merge_requests.go
+++ b/vendor/github.com/xanzy/go-gitlab/merge_requests.go
@@ -1,5 +1,5 @@
//
-// Copyright 2015, Sander van Harmelen
+// Copyright 2017, Sander van Harmelen
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -25,30 +25,29 @@ import (
// MergeRequestsService handles communication with the merge requests related
// methods of the GitLab API.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/merge_requests.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/merge_requests.html
type MergeRequestsService struct {
- client *Client
+ client *Client
+ timeStats *timeStatsService
}
// MergeRequest represents a GitLab merge request.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/merge_requests.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/merge_requests.html
type MergeRequest struct {
- ID int `json:"id"`
- IID int `json:"iid"`
- ProjectID int `json:"project_id"`
- Title string `json:"title"`
- Description string `json:"description"`
- WorkInProgress bool `json:"work_in_progress"`
- State string `json:"state"`
- CreatedAt *time.Time `json:"created_at"`
- UpdatedAt *time.Time `json:"updated_at"`
- TargetBranch string `json:"target_branch"`
- SourceBranch string `json:"source_branch"`
- Upvotes int `json:"upvotes"`
- Downvotes int `json:"downvotes"`
+ ID int `json:"id"`
+ IID int `json:"iid"`
+ ProjectID int `json:"project_id"`
+ Title string `json:"title"`
+ Description string `json:"description"`
+ WorkInProgress bool `json:"work_in_progress"`
+ State string `json:"state"`
+ CreatedAt string `json:"created_at"`
+ UpdatedAt string `json:"updated_at"`
+ TargetBranch string `json:"target_branch"`
+ SourceBranch string `json:"source_branch"`
+ Upvotes int `json:"upvotes"`
+ Downvotes int `json:"downvotes"`
Author struct {
Name string `json:"name"`
Username string `json:"username"`
@@ -77,13 +76,14 @@ type MergeRequest struct {
UpdatedAt *time.Time `json:"updated_at"`
DueDate string `json:"due_date"`
} `json:"milestone"`
- MergeWhenBuildSucceeds bool `json:"merge_when_build_succeeds"`
- MergeStatus string `json:"merge_status"`
- Subscribed bool `json:"subscribed"`
- UserNotesCount int `json:"user_notes_count"`
- SouldRemoveSourceBranch bool `json:"should_remove_source_branch"`
- ForceRemoveSourceBranch bool `json:"force_remove_source_branch"`
- Changes []struct {
+ MergeWhenPipelineSucceeds bool `json:"merge_when_pipeline_succeeds"`
+ MergeStatus string `json:"merge_status"`
+ SHA string `json:"sha"`
+ Subscribed bool `json:"subscribed"`
+ UserNotesCount int `json:"user_notes_count"`
+ SouldRemoveSourceBranch bool `json:"should_remove_source_branch"`
+ ForceRemoveSourceBranch bool `json:"force_remove_source_branch"`
+ Changes []struct {
OldPath string `json:"old_path"`
NewPath string `json:"new_path"`
AMode string `json:"a_mode"`
@@ -100,27 +100,110 @@ func (m MergeRequest) String() string {
return Stringify(m)
}
+// MergeRequestApprovals represents GitLab merge request approvals.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/merge_requests.html#merge-request-approvals
+type MergeRequestApprovals struct {
+ ID int `json:"id"`
+ ProjectID int `json:"project_id"`
+ Title string `json:"title"`
+ Description string `json:"description"`
+ State string `json:"state"`
+ CreatedAt *time.Time `json:"created_at"`
+ UpdatedAt *time.Time `json:"updated_at"`
+ MergeStatus string `json:"merge_status"`
+ ApprovalsRequired int `json:"approvals_required"`
+ ApprovalsMissing int `json:"approvals_missing"`
+ ApprovedBy []struct {
+ User struct {
+ Name string `json:"name"`
+ Username string `json:"username"`
+ ID int `json:"id"`
+ State string `json:"state"`
+ AvatarURL string `json:"avatar_url"`
+ WebURL string `json:"web_url"`
+ } `json:"user"`
+ } `json:"approved_by"`
+}
+
+func (m MergeRequestApprovals) String() string {
+ return Stringify(m)
+}
+
// ListMergeRequestsOptions represents the available ListMergeRequests()
// options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/merge_requests.md#list-merge-requests
+// https://docs.gitlab.com/ce/api/merge_requests.html#list-merge-requests
type ListMergeRequestsOptions struct {
ListOptions
- IID *int `url:"iid,omitempty" json:"iid,omitempty"`
- State *string `url:"state,omitempty" json:"state,omitempty"`
- OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"`
- Sort *string `url:"sort,omitempty" json:"sort,omitempty"`
+ State *string `url:"state,omitempty" json:"state,omitempty"`
+ OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"`
+ Sort *string `url:"sort,omitempty" json:"sort,omitempty"`
+ Milestone *string `url:"milestone,omitempty" json:"milestone,omitempty"`
+ View *string `url:"view,omitempty" json:"view,omitempty"`
+ Labels Labels `url:"labels,omitempty" json:"labels,omitempty"`
+ CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"`
+ CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"`
+ Scope *string `url:"scope,omitempty" json:"scope,omitempty"`
+ AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"`
+ AssigneeID *int `url:"assignee_id,omitempty" json:"assignee_id,omitempty"`
+ MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"`
+}
+
+// ListMergeRequests gets all merge requests. The state
+// parameter can be used to get only merge requests with a given state (opened,
+// closed, or merged) or all of them (all). The pagination parameters page and
+// per_page can be used to restrict the list of merge requests.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/merge_requests.html#list-merge-requests
+func (s *MergeRequestsService) ListMergeRequests(opt *ListMergeRequestsOptions, options ...OptionFunc) ([]*MergeRequest, *Response, error) {
+ req, err := s.client.NewRequest("GET", "merge_requests", opt, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var m []*MergeRequest
+ resp, err := s.client.Do(req, &m)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return m, resp, err
+}
+
+// ListProjectMergeRequestsOptions represents the available ListMergeRequests()
+// options.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/merge_requests.html#list-project-merge-requests
+type ListProjectMergeRequestsOptions struct {
+ ListOptions
+ IIDs []int `url:"iids[],omitempty" json:"iids,omitempty"`
+ State *string `url:"state,omitempty" json:"state,omitempty"`
+ OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"`
+ Sort *string `url:"sort,omitempty" json:"sort,omitempty"`
+ Milestone *string `url:"milestone,omitempty" json:"milestone,omitempty"`
+ View *string `url:"view,omitempty" json:"view,omitempty"`
+ Labels Labels `url:"labels,omitempty" json:"labels,omitempty"`
+ CreatedAfter *time.Time `url:"created_after,omitempty" json:"created_after,omitempty"`
+ CreatedBefore *time.Time `url:"created_before,omitempty" json:"created_before,omitempty"`
+ Scope *string `url:"scope,omitempty" json:"scope,omitempty"`
+ AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"`
+ AssigneeID *int `url:"assignee_id,omitempty" json:"assignee_id,omitempty"`
+ MyReactionEmoji *string `url:"my_reaction_emoji,omitempty" json:"my_reaction_emoji,omitempty"`
}
-// ListMergeRequests gets all merge requests for this project. The state
+// ListProjectMergeRequests gets all merge requests for this project. The state
// parameter can be used to get only merge requests with a given state (opened,
// closed, or merged) or all of them (all). The pagination parameters page and
// per_page can be used to restrict the list of merge requests.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/merge_requests.md#list-merge-requests
-func (s *MergeRequestsService) ListMergeRequests(pid interface{}, opt *ListMergeRequestsOptions, options ...OptionFunc) ([]*MergeRequest, *Response, error) {
+// https://docs.gitlab.com/ce/api/merge_requests.html#list-merge-requests
+func (s *MergeRequestsService) ListProjectMergeRequests(pid interface{}, opt *ListProjectMergeRequestsOptions, options ...OptionFunc) ([]*MergeRequest, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
@@ -144,7 +227,7 @@ func (s *MergeRequestsService) ListMergeRequests(pid interface{}, opt *ListMerge
// GetMergeRequest shows information about a single merge request.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/merge_requests.md#get-single-mr
+// https://docs.gitlab.com/ce/api/merge_requests.html#get-single-mr
func (s *MergeRequestsService) GetMergeRequest(pid interface{}, mergeRequest int, options ...OptionFunc) (*MergeRequest, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -166,10 +249,35 @@ func (s *MergeRequestsService) GetMergeRequest(pid interface{}, mergeRequest int
return m, resp, err
}
+// GetMergeRequestApprovals gets information about a merge requests approvals
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ee/api/merge_requests.html#merge-request-approvals
+func (s *MergeRequestsService) GetMergeRequestApprovals(pid interface{}, mergeRequest int, options ...OptionFunc) (*MergeRequestApprovals, *Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, nil, err
+ }
+ u := fmt.Sprintf("projects/%s/merge_requests/%d/approvals", url.QueryEscape(project), mergeRequest)
+
+ req, err := s.client.NewRequest("GET", u, nil, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ a := new(MergeRequestApprovals)
+ resp, err := s.client.Do(req, a)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return a, resp, err
+}
+
// GetMergeRequestCommits gets a list of merge request commits.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/merge_requests.md#get-single-mr-commits
+// https://docs.gitlab.com/ce/api/merge_requests.html#get-single-mr-commits
func (s *MergeRequestsService) GetMergeRequestCommits(pid interface{}, mergeRequest int, options ...OptionFunc) ([]*Commit, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -195,7 +303,7 @@ func (s *MergeRequestsService) GetMergeRequestCommits(pid interface{}, mergeRequ
// its files and changes.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/merge_requests.md#get-single-mr-changes
+// https://docs.gitlab.com/ce/api/merge_requests.html#get-single-mr-changes
func (s *MergeRequestsService) GetMergeRequestChanges(pid interface{}, mergeRequest int, options ...OptionFunc) (*MergeRequest, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -221,12 +329,12 @@ func (s *MergeRequestsService) GetMergeRequestChanges(pid interface{}, mergeRequ
// options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/merge_requests.md#create-mr
+// https://docs.gitlab.com/ce/api/merge_requests.html#create-mr
type CreateMergeRequestOptions struct {
Title *string `url:"title,omitempty" json:"title,omitempty"`
Description *string `url:"description,omitempty" json:"description,omitempty"`
- SourceBranch *string `url:"source_branch,omitemtpy" json:"source_branch,omitemtpy"`
- TargetBranch *string `url:"target_branch,omitemtpy" json:"target_branch,omitemtpy"`
+ SourceBranch *string `url:"source_branch,omitempty" json:"source_branch,omitempty"`
+ TargetBranch *string `url:"target_branch,omitempty" json:"target_branch,omitempty"`
AssigneeID *int `url:"assignee_id,omitempty" json:"assignee_id,omitempty"`
TargetProjectID *int `url:"target_project_id,omitempty" json:"target_project_id,omitempty"`
}
@@ -234,7 +342,7 @@ type CreateMergeRequestOptions struct {
// CreateMergeRequest creates a new merge request.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/merge_requests.md#create-mr
+// https://docs.gitlab.com/ce/api/merge_requests.html#create-mr
func (s *MergeRequestsService) CreateMergeRequest(pid interface{}, opt *CreateMergeRequestOptions, options ...OptionFunc) (*MergeRequest, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -260,19 +368,21 @@ func (s *MergeRequestsService) CreateMergeRequest(pid interface{}, opt *CreateMe
// options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/merge_requests.md#update-mr
+// https://docs.gitlab.com/ce/api/merge_requests.html#update-mr
type UpdateMergeRequestOptions struct {
Title *string `url:"title,omitempty" json:"title,omitempty"`
Description *string `url:"description,omitempty" json:"description,omitempty"`
- TargetBranch *string `url:"target_branch,omitemtpy" json:"target_branch,omitemtpy"`
+ TargetBranch *string `url:"target_branch,omitempty" json:"target_branch,omitempty"`
AssigneeID *int `url:"assignee_id,omitempty" json:"assignee_id,omitempty"`
+ Labels Labels `url:"labels,comma,omitempty" json:"labels,omitempty"`
+ MilestoneID *int `url:"milestone_id,omitempty" json:"milestone_id,omitempty"`
StateEvent *string `url:"state_event,omitempty" json:"state_event,omitempty"`
}
// UpdateMergeRequest updates an existing project milestone.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/merge_requests.md#update-mr
+// https://docs.gitlab.com/ce/api/merge_requests.html#update-mr
func (s *MergeRequestsService) UpdateMergeRequest(pid interface{}, mergeRequest int, opt *UpdateMergeRequestOptions, options ...OptionFunc) (*MergeRequest, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -298,12 +408,12 @@ func (s *MergeRequestsService) UpdateMergeRequest(pid interface{}, mergeRequest
// options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/merge_requests.md#accept-mr
+// https://docs.gitlab.com/ce/api/merge_requests.html#accept-mr
type AcceptMergeRequestOptions struct {
- MergeCommitMessage *string `url:"merge_commit_message,omitempty" json:"merge_commit_message,omitempty"`
- ShouldRemoveSourceBranch *bool `url:"should_remove_source_branch,omitempty" json:"should_remove_source_branch,omitempty"`
- MergeWhenBuildSucceeds *bool `url:"merge_when_build_succeeds,omitempty" json:"merge_when_build_succeeds,omitempty"`
- Sha *string `url:"sha,omitempty" json:"sha,omitempty"`
+ MergeCommitMessage *string `url:"merge_commit_message,omitempty" json:"merge_commit_message,omitempty"`
+ ShouldRemoveSourceBranch *bool `url:"should_remove_source_branch,omitempty" json:"should_remove_source_branch,omitempty"`
+ MergeWhenPipelineSucceeds *bool `url:"merge_when_pipeline_succeeds,omitempty" json:"merge_when_pipeline_succeeds,omitempty"`
+ Sha *string `url:"sha,omitempty" json:"sha,omitempty"`
}
// AcceptMergeRequest merges changes submitted with MR using this API. If merge
@@ -312,7 +422,7 @@ type AcceptMergeRequestOptions struct {
// already merged or closed - you get 405 and error message 'Method Not Allowed'
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/merge_requests.md#accept-mr
+// https://docs.gitlab.com/ce/api/merge_requests.html#accept-mr
func (s *MergeRequestsService) AcceptMergeRequest(pid interface{}, mergeRequest int, opt *AcceptMergeRequestOptions, options ...OptionFunc) (*MergeRequest, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -333,3 +443,43 @@ func (s *MergeRequestsService) AcceptMergeRequest(pid interface{}, mergeRequest
return m, resp, err
}
+
+// SetTimeEstimate sets the time estimate for a single project merge request.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/merge_requests.html#set-a-time-estimate-for-a-merge-request
+func (s *MergeRequestsService) SetTimeEstimate(pid interface{}, mergeRequest int, opt *SetTimeEstimateOptions, options ...OptionFunc) (*TimeStats, *Response, error) {
+ return s.timeStats.setTimeEstimate(pid, "merge_requests", mergeRequest, opt, options...)
+}
+
+// ResetTimeEstimate resets the time estimate for a single project merge request.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/merge_requests.html#reset-the-time-estimate-for-a-merge-request
+func (s *MergeRequestsService) ResetTimeEstimate(pid interface{}, mergeRequest int, options ...OptionFunc) (*TimeStats, *Response, error) {
+ return s.timeStats.resetTimeEstimate(pid, "merge_requests", mergeRequest, options...)
+}
+
+// AddSpentTime adds spent time for a single project merge request.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/merge_requests.html#add-spent-time-for-a-merge-request
+func (s *MergeRequestsService) AddSpentTime(pid interface{}, mergeRequest int, opt *AddSpentTimeOptions, options ...OptionFunc) (*TimeStats, *Response, error) {
+ return s.timeStats.addSpentTime(pid, "merge_requests", mergeRequest, opt, options...)
+}
+
+// ResetSpentTime resets the spent time for a single project merge request.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/merge_requests.html#reset-spent-time-for-a-merge-request
+func (s *MergeRequestsService) ResetSpentTime(pid interface{}, mergeRequest int, options ...OptionFunc) (*TimeStats, *Response, error) {
+ return s.timeStats.resetSpentTime(pid, "merge_requests", mergeRequest, options...)
+}
+
+// GetTimeSpent gets the spent time for a single project merge request.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/merge_requests.html#get-time-tracking-stats
+func (s *MergeRequestsService) GetTimeSpent(pid interface{}, mergeRequest int, options ...OptionFunc) (*TimeStats, *Response, error) {
+ return s.timeStats.getTimeSpent(pid, "merge_requests", mergeRequest, options...)
+}
diff --git a/vendor/github.com/xanzy/go-gitlab/milestones.go b/vendor/github.com/xanzy/go-gitlab/milestones.go
index 6c04d7b..2162455 100644
--- a/vendor/github.com/xanzy/go-gitlab/milestones.go
+++ b/vendor/github.com/xanzy/go-gitlab/milestones.go
@@ -1,5 +1,5 @@
//
-// Copyright 2015, Sander van Harmelen
+// Copyright 2017, Sander van Harmelen
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -25,16 +25,14 @@ import (
// MilestonesService handles communication with the milestone related methods
// of the GitLab API.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/milestones.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/milestones.html
type MilestonesService struct {
client *Client
}
// Milestone represents a GitLab milestone.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/milestones.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/milestones.html
type Milestone struct {
ID int `json:"id"`
Iid int `json:"iid"`
@@ -55,16 +53,16 @@ func (m Milestone) String() string {
// ListMilestonesOptions represents the available ListMilestones() options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/milestones.md#list-project-milestones
+// https://docs.gitlab.com/ce/api/milestones.html#list-project-milestones
type ListMilestonesOptions struct {
ListOptions
- IID *int `url:"iid,omitempty" json:"iid,omitempty"`
+ IIDs []int `url:"iids,omitempty" json:"iids,omitempty"`
}
// ListMilestones returns a list of project milestones.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/milestones.md#list-project-milestones
+// https://docs.gitlab.com/ce/api/milestones.html#list-project-milestones
func (s *MilestonesService) ListMilestones(pid interface{}, opt *ListMilestonesOptions, options ...OptionFunc) ([]*Milestone, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -89,7 +87,7 @@ func (s *MilestonesService) ListMilestones(pid interface{}, opt *ListMilestonesO
// GetMilestone gets a single project milestone.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/milestones.md#get-single-milestone
+// https://docs.gitlab.com/ce/api/milestones.html#get-single-milestone
func (s *MilestonesService) GetMilestone(pid interface{}, milestone int, options ...OptionFunc) (*Milestone, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -114,7 +112,7 @@ func (s *MilestonesService) GetMilestone(pid interface{}, milestone int, options
// CreateMilestoneOptions represents the available CreateMilestone() options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/milestones.md#create-new-milestone
+// https://docs.gitlab.com/ce/api/milestones.html#create-new-milestone
type CreateMilestoneOptions struct {
Title *string `url:"title,omitempty" json:"title,omitempty"`
Description *string `url:"description,omitempty" json:"description,omitempty"`
@@ -125,7 +123,7 @@ type CreateMilestoneOptions struct {
// CreateMilestone creates a new project milestone.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/milestones.md#create-new-milestone
+// https://docs.gitlab.com/ce/api/milestones.html#create-new-milestone
func (s *MilestonesService) CreateMilestone(pid interface{}, opt *CreateMilestoneOptions, options ...OptionFunc) (*Milestone, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -150,7 +148,7 @@ func (s *MilestonesService) CreateMilestone(pid interface{}, opt *CreateMileston
// UpdateMilestoneOptions represents the available UpdateMilestone() options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/milestones.md#edit-milestone
+// https://docs.gitlab.com/ce/api/milestones.html#edit-milestone
type UpdateMilestoneOptions struct {
Title *string `url:"title,omitempty" json:"title,omitempty"`
Description *string `url:"description,omitempty" json:"description,omitempty"`
@@ -162,7 +160,7 @@ type UpdateMilestoneOptions struct {
// UpdateMilestone updates an existing project milestone.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/milestones.md#edit-milestone
+// https://docs.gitlab.com/ce/api/milestones.html#edit-milestone
func (s *MilestonesService) UpdateMilestone(pid interface{}, milestone int, opt *UpdateMilestoneOptions, options ...OptionFunc) (*Milestone, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -187,7 +185,7 @@ func (s *MilestonesService) UpdateMilestone(pid interface{}, milestone int, opt
// GetMilestoneIssuesOptions represents the available GetMilestoneIssues() options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/milestones.md#get-all-issues-assigned-to-a-single-milestone
+// https://docs.gitlab.com/ce/api/milestones.html#get-all-issues-assigned-to-a-single-milestone
type GetMilestoneIssuesOptions struct {
ListOptions
}
@@ -195,7 +193,7 @@ type GetMilestoneIssuesOptions struct {
// GetMilestoneIssues gets all issues assigned to a single project milestone.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/milestones.md#get-all-issues-assigned-to-a-single-milestone
+// https://docs.gitlab.com/ce/api/milestones.html#get-all-issues-assigned-to-a-single-milestone
func (s *MilestonesService) GetMilestoneIssues(pid interface{}, milestone int, opt *GetMilestoneIssuesOptions, options ...OptionFunc) ([]*Issue, *Response, error) {
project, err := parseID(pid)
if err != nil {
diff --git a/vendor/github.com/xanzy/go-gitlab/namespaces.go b/vendor/github.com/xanzy/go-gitlab/namespaces.go
index 090f61b..22ad7af 100644
--- a/vendor/github.com/xanzy/go-gitlab/namespaces.go
+++ b/vendor/github.com/xanzy/go-gitlab/namespaces.go
@@ -1,5 +1,5 @@
//
-// Copyright 2015, Sander van Harmelen
+// Copyright 2017, Sander van Harmelen
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -19,16 +19,14 @@ package gitlab
// NamespacesService handles communication with the namespace related methods
// of the GitLab API.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/namespaces.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/namespaces.html
type NamespacesService struct {
client *Client
}
// Namespace represents a GitLab namespace.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/namespaces.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/namespaces.html
type Namespace struct {
ID int `json:"id"`
Path string `json:"path"`
@@ -41,8 +39,7 @@ func (n Namespace) String() string {
// ListNamespacesOptions represents the available ListNamespaces() options.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/namespaces.md#list-namespaces
+// GitLab API docs: https://docs.gitlab.com/ce/api/namespaces.html#list-namespaces
type ListNamespacesOptions struct {
ListOptions
Search *string `url:"search,omitempty" json:"search,omitempty"`
@@ -50,8 +47,7 @@ type ListNamespacesOptions struct {
// ListNamespaces gets a list of projects accessible by the authenticated user.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/namespaces.md#list-namespaces
+// GitLab API docs: https://docs.gitlab.com/ce/api/namespaces.html#list-namespaces
func (s *NamespacesService) ListNamespaces(opt *ListNamespacesOptions, options ...OptionFunc) ([]*Namespace, *Response, error) {
req, err := s.client.NewRequest("GET", "namespaces", opt, options)
if err != nil {
@@ -71,7 +67,7 @@ func (s *NamespacesService) ListNamespaces(opt *ListNamespacesOptions, options .
// or path.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/namespaces.md#search-for-namespace
+// https://docs.gitlab.com/ce/api/namespaces.html#search-for-namespace
func (s *NamespacesService) SearchNamespace(query string, options ...OptionFunc) ([]*Namespace, *Response, error) {
var q struct {
Search string `url:"search,omitempty" json:"search,omitempty"`
diff --git a/vendor/github.com/xanzy/go-gitlab/notes.go b/vendor/github.com/xanzy/go-gitlab/notes.go
index 2f11df4..b8e9f32 100644
--- a/vendor/github.com/xanzy/go-gitlab/notes.go
+++ b/vendor/github.com/xanzy/go-gitlab/notes.go
@@ -1,5 +1,5 @@
//
-// Copyright 2015, Sander van Harmelen
+// Copyright 2017, Sander van Harmelen
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -25,16 +25,14 @@ import (
// NotesService handles communication with the notes related methods
// of the GitLab API.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/notes.html
type NotesService struct {
client *Client
}
// Note represents a GitLab note.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/notes.html
type Note struct {
ID int `json:"id"`
Body string `json:"body"`
@@ -49,6 +47,7 @@ type Note struct {
State string `json:"state"`
CreatedAt *time.Time `json:"created_at"`
} `json:"author"`
+ System bool `json:"system"`
ExpiresAt *time.Time `json:"expires_at"`
UpdatedAt *time.Time `json:"updated_at"`
CreatedAt *time.Time `json:"created_at"`
@@ -61,7 +60,7 @@ func (n Note) String() string {
// ListIssueNotesOptions represents the available ListIssueNotes() options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md#list-project-issue-notes
+// https://docs.gitlab.com/ce/api/notes.html#list-project-issue-notes
type ListIssueNotesOptions struct {
ListOptions
}
@@ -69,7 +68,7 @@ type ListIssueNotesOptions struct {
// ListIssueNotes gets a list of all notes for a single issue.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md#list-project-issue-notes
+// https://docs.gitlab.com/ce/api/notes.html#list-project-issue-notes
func (s *NotesService) ListIssueNotes(pid interface{}, issue int, opt *ListIssueNotesOptions, options ...OptionFunc) ([]*Note, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -94,8 +93,8 @@ func (s *NotesService) ListIssueNotes(pid interface{}, issue int, opt *ListIssue
// GetIssueNote returns a single note for a specific project issue.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md#get-single-issue-note
-func (s *NotesService) GetIssueNote(pid interface{}, issue int, note int, options ...OptionFunc) (*Note, *Response, error) {
+// https://docs.gitlab.com/ce/api/notes.html#get-single-issue-note
+func (s *NotesService) GetIssueNote(pid interface{}, issue, note int, options ...OptionFunc) (*Note, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
@@ -120,7 +119,7 @@ func (s *NotesService) GetIssueNote(pid interface{}, issue int, note int, option
// options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md#create-new-issue-note
+// https://docs.gitlab.com/ce/api/notes.html#create-new-issue-note
type CreateIssueNoteOptions struct {
Body *string `url:"body,omitempty" json:"body,omitempty"`
}
@@ -128,7 +127,7 @@ type CreateIssueNoteOptions struct {
// CreateIssueNote creates a new note to a single project issue.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md#create-new-issue-note
+// https://docs.gitlab.com/ce/api/notes.html#create-new-issue-note
func (s *NotesService) CreateIssueNote(pid interface{}, issue int, opt *CreateIssueNoteOptions, options ...OptionFunc) (*Note, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -154,15 +153,15 @@ func (s *NotesService) CreateIssueNote(pid interface{}, issue int, opt *CreateIs
// options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md#modify-existing-issue-note
+// https://docs.gitlab.com/ce/api/notes.html#modify-existing-issue-note
type UpdateIssueNoteOptions struct {
Body *string `url:"body,omitempty" json:"body,omitempty"`
}
// UpdateIssueNote modifies existing note of an issue.
//
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md#modify-existing-issue-note
-func (s *NotesService) UpdateIssueNote(pid interface{}, issue int, note int, opt *UpdateIssueNoteOptions, options ...OptionFunc) (*Note, *Response, error) {
+// https://docs.gitlab.com/ce/api/notes.html#modify-existing-issue-note
+func (s *NotesService) UpdateIssueNote(pid interface{}, issue, note int, opt *UpdateIssueNoteOptions, options ...OptionFunc) (*Note, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
@@ -183,11 +182,29 @@ func (s *NotesService) UpdateIssueNote(pid interface{}, issue int, note int, opt
return n, resp, err
}
+// DeleteIssueNote deletes an existing note of an issue.
+//
+// https://docs.gitlab.com/ce/api/notes.html#delete-an-issue-note
+func (s *NotesService) DeleteIssueNote(pid interface{}, issue, note int, options ...OptionFunc) (*Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, err
+ }
+ u := fmt.Sprintf("projects/%s/issues/%d/notes/%d", url.QueryEscape(project), issue, note)
+
+ req, err := s.client.NewRequest("DELETE", u, nil, options)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
+
// ListSnippetNotes gets a list of all notes for a single snippet. Snippet
// notes are comments users can post to a snippet.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md#list-all-snippet-notes
+// https://docs.gitlab.com/ce/api/notes.html#list-all-snippet-notes
func (s *NotesService) ListSnippetNotes(pid interface{}, snippet int, options ...OptionFunc) ([]*Note, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -212,8 +229,8 @@ func (s *NotesService) ListSnippetNotes(pid interface{}, snippet int, options ..
// GetSnippetNote returns a single note for a given snippet.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md#get-single-snippet-note
-func (s *NotesService) GetSnippetNote(pid interface{}, snippet int, note int, options ...OptionFunc) (*Note, *Response, error) {
+// https://docs.gitlab.com/ce/api/notes.html#get-single-snippet-note
+func (s *NotesService) GetSnippetNote(pid interface{}, snippet, note int, options ...OptionFunc) (*Note, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
@@ -238,7 +255,7 @@ func (s *NotesService) GetSnippetNote(pid interface{}, snippet int, note int, op
// options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md#create-new-snippet-note
+// https://docs.gitlab.com/ce/api/notes.html#create-new-snippet-note
type CreateSnippetNoteOptions struct {
Body *string `url:"body,omitempty" json:"body,omitempty"`
}
@@ -247,7 +264,7 @@ type CreateSnippetNoteOptions struct {
// comments users can post to a snippet.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md#create-new-snippet-note
+// https://docs.gitlab.com/ce/api/notes.html#create-new-snippet-note
func (s *NotesService) CreateSnippetNote(pid interface{}, snippet int, opt *CreateSnippetNoteOptions, options ...OptionFunc) (*Note, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -273,15 +290,15 @@ func (s *NotesService) CreateSnippetNote(pid interface{}, snippet int, opt *Crea
// options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md#modify-existing-snippet-note
+// https://docs.gitlab.com/ce/api/notes.html#modify-existing-snippet-note
type UpdateSnippetNoteOptions struct {
Body *string `url:"body,omitempty" json:"body,omitempty"`
}
// UpdateSnippetNote modifies existing note of a snippet.
//
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md#modify-existing-snippet-note
-func (s *NotesService) UpdateSnippetNote(pid interface{}, snippet int, note int, opt *UpdateSnippetNoteOptions, options ...OptionFunc) (*Note, *Response, error) {
+// https://docs.gitlab.com/ce/api/notes.html#modify-existing-snippet-note
+func (s *NotesService) UpdateSnippetNote(pid interface{}, snippet, note int, opt *UpdateSnippetNoteOptions, options ...OptionFunc) (*Note, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
@@ -302,10 +319,28 @@ func (s *NotesService) UpdateSnippetNote(pid interface{}, snippet int, note int,
return n, resp, err
}
+// DeleteSnippetNote deletes an existing note of a snippet.
+//
+// https://docs.gitlab.com/ce/api/notes.html#delete-a-snippet-note
+func (s *NotesService) DeleteSnippetNote(pid interface{}, snippet, note int, options ...OptionFunc) (*Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, err
+ }
+ u := fmt.Sprintf("projects/%s/snippets/%d/notes/%d", url.QueryEscape(project), snippet, note)
+
+ req, err := s.client.NewRequest("DELETE", u, nil, options)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
+
// ListMergeRequestNotes gets a list of all notes for a single merge request.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md#list-all-merge-request-notes
+// https://docs.gitlab.com/ce/api/notes.html#list-all-merge-request-notes
func (s *NotesService) ListMergeRequestNotes(pid interface{}, mergeRequest int, options ...OptionFunc) ([]*Note, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -330,8 +365,8 @@ func (s *NotesService) ListMergeRequestNotes(pid interface{}, mergeRequest int,
// GetMergeRequestNote returns a single note for a given merge request.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md#get-single-merge-request-note
-func (s *NotesService) GetMergeRequestNote(pid interface{}, mergeRequest int, note int, options ...OptionFunc) (*Note, *Response, error) {
+// https://docs.gitlab.com/ce/api/notes.html#get-single-merge-request-note
+func (s *NotesService) GetMergeRequestNote(pid interface{}, mergeRequest, note int, options ...OptionFunc) (*Note, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
@@ -356,7 +391,7 @@ func (s *NotesService) GetMergeRequestNote(pid interface{}, mergeRequest int, no
// CreateMergeRequestNote() options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md#create-new-merge-request-note
+// https://docs.gitlab.com/ce/api/notes.html#create-new-merge-request-note
type CreateMergeRequestNoteOptions struct {
Body *string `url:"body,omitempty" json:"body,omitempty"`
}
@@ -364,7 +399,7 @@ type CreateMergeRequestNoteOptions struct {
// CreateMergeRequestNote creates a new note for a single merge request.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md#create-new-merge-request-note
+// https://docs.gitlab.com/ce/api/notes.html#create-new-merge-request-note
func (s *NotesService) CreateMergeRequestNote(pid interface{}, mergeRequest int, opt *CreateMergeRequestNoteOptions, options ...OptionFunc) (*Note, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -390,15 +425,15 @@ func (s *NotesService) CreateMergeRequestNote(pid interface{}, mergeRequest int,
// UpdateMergeRequestNote() options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md#modify-existing-merge-request-note
+// https://docs.gitlab.com/ce/api/notes.html#modify-existing-merge-request-note
type UpdateMergeRequestNoteOptions struct {
Body *string `url:"body,omitempty" json:"body,omitempty"`
}
// UpdateMergeRequestNote modifies existing note of a merge request.
//
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md#modify-existing-merge-request-note
-func (s *NotesService) UpdateMergeRequestNote(pid interface{}, mergeRequest int, note int, opt *UpdateMergeRequestNoteOptions, options ...OptionFunc) (*Note, *Response, error) {
+// https://docs.gitlab.com/ce/api/notes.html#modify-existing-merge-request-note
+func (s *NotesService) UpdateMergeRequestNote(pid interface{}, mergeRequest, note int, opt *UpdateMergeRequestNoteOptions, options ...OptionFunc) (*Note, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
@@ -418,3 +453,22 @@ func (s *NotesService) UpdateMergeRequestNote(pid interface{}, mergeRequest int,
return n, resp, err
}
+
+// DeleteMergeRequestNote deletes an existing note of a merge request.
+//
+// https://docs.gitlab.com/ce/api/notes.html#delete-a-merge-request-note
+func (s *NotesService) DeleteMergeRequestNote(pid interface{}, mergeRequest, note int, options ...OptionFunc) (*Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, err
+ }
+ u := fmt.Sprintf(
+ "projects/%s/merge_requests/%d/notes/%d", url.QueryEscape(project), mergeRequest, note)
+
+ req, err := s.client.NewRequest("DELETE", u, nil, options)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
diff --git a/vendor/github.com/xanzy/go-gitlab/notifications.go b/vendor/github.com/xanzy/go-gitlab/notifications.go
index 544409c..a5501dd 100644
--- a/vendor/github.com/xanzy/go-gitlab/notifications.go
+++ b/vendor/github.com/xanzy/go-gitlab/notifications.go
@@ -9,8 +9,7 @@ import (
// NotificationSettingsService handles communication with the notification settings
// related methods of the GitLab API.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notification_settings.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/notification_settings.html
type NotificationSettingsService struct {
client *Client
}
@@ -18,17 +17,17 @@ type NotificationSettingsService struct {
// NotificationSettings represents the Gitlab notification setting.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notification_settings.md#notification-settings
+// https://docs.gitlab.com/ce/api/notification_settings.html#notification-settings
type NotificationSettings struct {
Level NotificationLevelValue `json:"level"`
NotificationEmail string `json:"notification_email"`
Events *NotificationEvents `json:"events"`
}
-// NotificationEvents represents the avialable notification setting events.
+// NotificationEvents represents the available notification setting events.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notification_settings.md#notification-settings
+// https://docs.gitlab.com/ce/api/notification_settings.html#notification-settings
type NotificationEvents struct {
CloseIssue bool `json:"close_issue"`
CloseMergeRequest bool `json:"close_merge_request"`
@@ -51,7 +50,7 @@ func (ns NotificationSettings) String() string {
// GetGlobalSettings returns current notification settings and email address.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notification_settings.md#global-notification-settings
+// https://docs.gitlab.com/ce/api/notification_settings.html#global-notification-settings
func (s *NotificationSettingsService) GetGlobalSettings(options ...OptionFunc) (*NotificationSettings, *Response, error) {
u := "notification_settings"
@@ -91,7 +90,7 @@ type NotificationSettingsOptions struct {
// UpdateGlobalSettings updates current notification settings and email address.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notification_settings.md#update-global-notification-settings
+// https://docs.gitlab.com/ce/api/notification_settings.html#update-global-notification-settings
func (s *NotificationSettingsService) UpdateGlobalSettings(opt *NotificationSettingsOptions, options ...OptionFunc) (*NotificationSettings, *Response, error) {
if opt.Level != nil && *opt.Level == GlobalNotificationLevel {
return nil, nil, errors.New(
@@ -117,7 +116,7 @@ func (s *NotificationSettingsService) UpdateGlobalSettings(opt *NotificationSett
// GetSettingsForGroup returns current group notification settings.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notification_settings.md#group-project-level-notification-settings
+// https://docs.gitlab.com/ce/api/notification_settings.html#group-project-level-notification-settings
func (s *NotificationSettingsService) GetSettingsForGroup(gid interface{}, options ...OptionFunc) (*NotificationSettings, *Response, error) {
group, err := parseID(gid)
if err != nil {
@@ -142,7 +141,7 @@ func (s *NotificationSettingsService) GetSettingsForGroup(gid interface{}, optio
// GetSettingsForProject returns current project notification settings.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notification_settings.md#group-project-level-notification-settings
+// https://docs.gitlab.com/ce/api/notification_settings.html#group-project-level-notification-settings
func (s *NotificationSettingsService) GetSettingsForProject(pid interface{}, options ...OptionFunc) (*NotificationSettings, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -167,7 +166,7 @@ func (s *NotificationSettingsService) GetSettingsForProject(pid interface{}, opt
// UpdateSettingsForGroup updates current group notification settings.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notification_settings.md#update-group-project-level-notification-settings
+// https://docs.gitlab.com/ce/api/notification_settings.html#update-group-project-level-notification-settings
func (s *NotificationSettingsService) UpdateSettingsForGroup(gid interface{}, opt *NotificationSettingsOptions, options ...OptionFunc) (*NotificationSettings, *Response, error) {
group, err := parseID(gid)
if err != nil {
@@ -192,7 +191,7 @@ func (s *NotificationSettingsService) UpdateSettingsForGroup(gid interface{}, op
// UpdateSettingsForProject updates current project notification settings.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notification_settings.md#update-group-project-level-notification-settings
+// https://docs.gitlab.com/ce/api/notification_settings.html#update-group-project-level-notification-settings
func (s *NotificationSettingsService) UpdateSettingsForProject(pid interface{}, opt *NotificationSettingsOptions, options ...OptionFunc) (*NotificationSettings, *Response, error) {
project, err := parseID(pid)
if err != nil {
diff --git a/vendor/github.com/xanzy/go-gitlab/pipeline_triggers.go b/vendor/github.com/xanzy/go-gitlab/pipeline_triggers.go
new file mode 100644
index 0000000..bed82f3
--- /dev/null
+++ b/vendor/github.com/xanzy/go-gitlab/pipeline_triggers.go
@@ -0,0 +1,234 @@
+package gitlab
+
+import (
+ "fmt"
+ "net/url"
+ "time"
+)
+
+// PipelineTriggersService handles Project pipeline triggers.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/pipeline_triggers.html
+type PipelineTriggersService struct {
+ client *Client
+}
+
+// PipelineTrigger represents a project pipeline trigger.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/pipeline_triggers.html#pipeline-triggers
+type PipelineTrigger struct {
+ ID int `json:"id"`
+ Description string `json:"description"`
+ CreatedAt *time.Time `json:"created_at"`
+ DeletedAt *time.Time `json:"deleted_at"`
+ LastUsed *time.Time `json:"last_used"`
+ Token string `json:"token"`
+ UpdatedAt *time.Time `json:"updated_at"`
+ Owner *User `json:"owner"`
+}
+
+// ListPipelineTriggersOptions represents the available ListPipelineTriggers() options.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/pipeline_triggers.html#list-project-triggers
+type ListPipelineTriggersOptions struct {
+ ListOptions
+}
+
+// ListPipelineTriggers gets a list of project triggers.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/pipeline_triggers.html#list-project-triggers
+func (s *PipelineTriggersService) ListPipelineTriggers(pid interface{}, opt *ListPipelineTriggersOptions, options ...OptionFunc) ([]*PipelineTrigger, *Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, nil, err
+ }
+ u := fmt.Sprintf("projects/%s/triggers", url.QueryEscape(project))
+
+ req, err := s.client.NewRequest("GET", u, opt, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var pt []*PipelineTrigger
+ resp, err := s.client.Do(req, &pt)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return pt, resp, err
+}
+
+// GetPipelineTrigger gets a specific pipeline trigger for a project.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/pipeline_triggers.html#get-trigger-details
+func (s *PipelineTriggersService) GetPipelineTrigger(pid interface{}, trigger int, options ...OptionFunc) (*PipelineTrigger, *Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, nil, err
+ }
+ u := fmt.Sprintf("projects/%s/triggers/%d", url.QueryEscape(project), trigger)
+
+ req, err := s.client.NewRequest("GET", u, nil, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ pt := new(PipelineTrigger)
+ resp, err := s.client.Do(req, pt)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return pt, resp, err
+}
+
+// AddPipelineTriggerOptions represents the available AddPipelineTrigger() options.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/pipeline_triggers.html#create-a-project-trigger
+type AddPipelineTriggerOptions struct {
+ Description *string `url:"description,omitempty" json:"description,omitempty"`
+}
+
+// AddPipelineTrigger adds a pipeline trigger to a specified project.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/pipeline_triggers.html#create-a-project-trigger
+func (s *PipelineTriggersService) AddPipelineTrigger(pid interface{}, opt *AddPipelineTriggerOptions, options ...OptionFunc) (*PipelineTrigger, *Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, nil, err
+ }
+ u := fmt.Sprintf("projects/%s/triggers", url.QueryEscape(project))
+
+ req, err := s.client.NewRequest("POST", u, opt, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ pt := new(PipelineTrigger)
+ resp, err := s.client.Do(req, pt)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return pt, resp, err
+}
+
+// EditPipelineTriggerOptions represents the available EditPipelineTrigger() options.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/pipeline_triggers.html#update-a-project-trigger
+type EditPipelineTriggerOptions struct {
+ Description *string `url:"description,omitempty" json:"description,omitempty"`
+}
+
+// EditPipelineTrigger edits a trigger for a specified project.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/pipeline_triggers.html#update-a-project-trigger
+func (s *PipelineTriggersService) EditPipelineTrigger(pid interface{}, trigger int, opt *EditPipelineTriggerOptions, options ...OptionFunc) (*PipelineTrigger, *Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, nil, err
+ }
+ u := fmt.Sprintf("projects/%s/triggers/%d", url.QueryEscape(project), trigger)
+
+ req, err := s.client.NewRequest("PUT", u, opt, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ pt := new(PipelineTrigger)
+ resp, err := s.client.Do(req, pt)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return pt, resp, err
+}
+
+// TakeOwnershipOfPipelineTrigger sets the owner of the specified
+// pipeline trigger to the user issuing the request.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/pipeline_triggers.html#take-ownership-of-a-project-trigger
+func (s *PipelineTriggersService) TakeOwnershipOfPipelineTrigger(pid interface{}, trigger int, options ...OptionFunc) (*PipelineTrigger, *Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, nil, err
+ }
+ u := fmt.Sprintf("projects/%s/triggers/%d/take_ownership", url.QueryEscape(project), trigger)
+
+ req, err := s.client.NewRequest("POST", u, nil, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ pt := new(PipelineTrigger)
+ resp, err := s.client.Do(req, pt)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return pt, resp, err
+}
+
+// DeletePipelineTrigger removes a trigger from a project.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/pipeline_triggers.html#remove-a-project-trigger
+func (s *PipelineTriggersService) DeletePipelineTrigger(pid interface{}, trigger int, options ...OptionFunc) (*Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, err
+ }
+ u := fmt.Sprintf("projects/%s/triggers/%d", url.QueryEscape(project), trigger)
+
+ req, err := s.client.NewRequest("DELETE", u, nil, options)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
+
+// RunPipelineTriggerOptions represents the available RunPipelineTrigger() options.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/ci/triggers/README.html#triggering-a-pipeline
+type RunPipelineTriggerOptions struct {
+ Ref *string `url:"ref" json:"ref"`
+ Token *string `url:"token" json:"token"`
+ Variables map[string]string `url:"variables,omitempty" json:"variables,omitempty"`
+}
+
+// RunPipelineTrigger starts a trigger from a project.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/ci/triggers/README.html#triggering-a-pipeline
+func (s *PipelineTriggersService) RunPipelineTrigger(pid interface{}, opt *RunPipelineTriggerOptions, options ...OptionFunc) (*Pipeline, *Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, nil, err
+ }
+ u := fmt.Sprintf("projects/%s/trigger/pipeline", url.QueryEscape(project))
+
+ req, err := s.client.NewRequest("POST", u, opt, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ pt := new(Pipeline)
+ resp, err := s.client.Do(req, pt)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return pt, resp, err
+}
diff --git a/vendor/github.com/xanzy/go-gitlab/pipelines.go b/vendor/github.com/xanzy/go-gitlab/pipelines.go
index 6faceb9..5e88dad 100644
--- a/vendor/github.com/xanzy/go-gitlab/pipelines.go
+++ b/vendor/github.com/xanzy/go-gitlab/pipelines.go
@@ -25,16 +25,14 @@ import (
// PipelinesService handles communication with the repositories related
// methods of the GitLab API.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/pipelines.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/pipelines.html
type PipelinesService struct {
client *Client
}
// Pipeline represents a GitLab pipeline.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/pipelines.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/pipelines.html
type Pipeline struct {
ID int `json:"id"`
Status string `json:"status"`
@@ -64,11 +62,24 @@ func (i Pipeline) String() string {
return Stringify(i)
}
+// PipelineList represents a GitLab list project pipelines
+//
+// GitLab API docs: https://docs.gitlab.com/ce/api/pipelines.html#list-project-pipelines
+type PipelineList []struct {
+ ID int `json:"id"`
+ Status string `json:"status"`
+ Ref string `json:"ref"`
+ Sha string `json:"sha"`
+}
+
+func (i PipelineList) String() string {
+ return Stringify(i)
+}
+
// ListProjectPipelines gets a list of project piplines.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/pipelines.md#list-project-pipelines
-func (s *PipelinesService) ListProjectPipelines(pid interface{}, options ...OptionFunc) ([]*Pipeline, *Response, error) {
+// GitLab API docs: https://docs.gitlab.com/ce/api/pipelines.html#list-project-pipelines
+func (s *PipelinesService) ListProjectPipelines(pid interface{}, options ...OptionFunc) (PipelineList, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
@@ -80,7 +91,7 @@ func (s *PipelinesService) ListProjectPipelines(pid interface{}, options ...Opti
return nil, nil, err
}
- var p []*Pipeline
+ var p PipelineList
resp, err := s.client.Do(req, &p)
if err != nil {
return nil, resp, err
@@ -90,8 +101,7 @@ func (s *PipelinesService) ListProjectPipelines(pid interface{}, options ...Opti
// GetPipeline gets a single project pipeline.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/pipelines.md#get-a-single-pipeline
+// GitLab API docs: https://docs.gitlab.com/ce/api/pipelines.html#get-a-single-pipeline
func (s *PipelinesService) GetPipeline(pid interface{}, pipeline int, options ...OptionFunc) (*Pipeline, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -115,16 +125,14 @@ func (s *PipelinesService) GetPipeline(pid interface{}, pipeline int, options ..
// CreatePipelineOptions represents the available CreatePipeline() options.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/pipelines.md#create-a-new-pipeline
+// GitLab API docs: https://docs.gitlab.com/ce/api/pipelines.html#create-a-new-pipeline
type CreatePipelineOptions struct {
Ref *string `url:"ref,omitempty" json:"ref"`
}
// CreatePipeline creates a new project pipeline.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/pipelines.md#create-a-new-pipeline
+// GitLab API docs: https://docs.gitlab.com/ce/api/pipelines.html#create-a-new-pipeline
func (s *PipelinesService) CreatePipeline(pid interface{}, opt *CreatePipelineOptions, options ...OptionFunc) (*Pipeline, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -149,7 +157,7 @@ func (s *PipelinesService) CreatePipeline(pid interface{}, opt *CreatePipelineOp
// RetryPipelineBuild retries failed builds in a pipeline
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/pipelines.md#retry-failed-builds-in-a-pipeline
+// https://docs.gitlab.com/ce/api/pipelines.html#retry-failed-builds-in-a-pipeline
func (s *PipelinesService) RetryPipelineBuild(pid interface{}, pipelineID int, options ...OptionFunc) (*Pipeline, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -174,7 +182,7 @@ func (s *PipelinesService) RetryPipelineBuild(pid interface{}, pipelineID int, o
// CancelPipelineBuild cancels a pipeline builds
//
// GitLab API docs:
-//https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/pipelines.md#cancel-a-pipelines-builds
+//https://docs.gitlab.com/ce/api/pipelines.html#cancel-a-pipelines-builds
func (s *PipelinesService) CancelPipelineBuild(pid interface{}, pipelineID int, options ...OptionFunc) (*Pipeline, *Response, error) {
project, err := parseID(pid)
if err != nil {
diff --git a/vendor/github.com/xanzy/go-gitlab/project_members.go b/vendor/github.com/xanzy/go-gitlab/project_members.go
new file mode 100644
index 0000000..70beb72
--- /dev/null
+++ b/vendor/github.com/xanzy/go-gitlab/project_members.go
@@ -0,0 +1,179 @@
+//
+// Copyright 2017, Sander van Harmelen
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package gitlab
+
+import (
+ "fmt"
+ "net/url"
+)
+
+// ProjectMembersService handles communication with the project members
+// related methods of the GitLab API.
+//
+// GitLab API docs: https://docs.gitlab.com/ce/api/members.html
+type ProjectMembersService struct {
+ client *Client
+}
+
+// ListProjectMembersOptions represents the available ListProjectMembers()
+// options.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/members.html#list-all-members-of-a-group-or-project
+type ListProjectMembersOptions struct {
+ ListOptions
+ Query *string `url:"query,omitempty" json:"query,omitempty"`
+}
+
+// ListProjectMembers gets a list of a project's team members.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/members.html#list-all-members-of-a-group-or-project
+func (s *ProjectMembersService) ListProjectMembers(pid interface{}, opt *ListProjectMembersOptions, options ...OptionFunc) ([]*ProjectMember, *Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, nil, err
+ }
+ u := fmt.Sprintf("projects/%s/members", url.QueryEscape(project))
+
+ req, err := s.client.NewRequest("GET", u, opt, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var pm []*ProjectMember
+ resp, err := s.client.Do(req, &pm)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return pm, resp, err
+}
+
+// GetProjectMember gets a project team member.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/members.html#get-a-member-of-a-group-or-project
+func (s *ProjectMembersService) GetProjectMember(pid interface{}, user int, options ...OptionFunc) (*ProjectMember, *Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, nil, err
+ }
+ u := fmt.Sprintf("projects/%s/members/%d", url.QueryEscape(project), user)
+
+ req, err := s.client.NewRequest("GET", u, nil, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ pm := new(ProjectMember)
+ resp, err := s.client.Do(req, pm)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return pm, resp, err
+}
+
+// AddProjectMemberOptions represents the available AddProjectMember() options.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/members.html#add-a-member-to-a-group-or-project
+type AddProjectMemberOptions struct {
+ UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"`
+ AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"`
+}
+
+// AddProjectMember adds a user to a project team. This is an idempotent
+// method and can be called multiple times with the same parameters. Adding
+// team membership to a user that is already a member does not affect the
+// existing membership.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/members.html#add-a-member-to-a-group-or-project
+func (s *ProjectMembersService) AddProjectMember(pid interface{}, opt *AddProjectMemberOptions, options ...OptionFunc) (*ProjectMember, *Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, nil, err
+ }
+ u := fmt.Sprintf("projects/%s/members", url.QueryEscape(project))
+
+ req, err := s.client.NewRequest("POST", u, opt, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ pm := new(ProjectMember)
+ resp, err := s.client.Do(req, pm)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return pm, resp, err
+}
+
+// EditProjectMemberOptions represents the available EditProjectMember() options.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/members.html#edit-a-member-of-a-group-or-project
+type EditProjectMemberOptions struct {
+ AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"`
+}
+
+// EditProjectMember updates a project team member to a specified access level..
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/members.html#edit-a-member-of-a-group-or-project
+func (s *ProjectMembersService) EditProjectMember(pid interface{}, user int, opt *EditProjectMemberOptions, options ...OptionFunc) (*ProjectMember, *Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, nil, err
+ }
+ u := fmt.Sprintf("projects/%s/members/%d", url.QueryEscape(project), user)
+
+ req, err := s.client.NewRequest("PUT", u, opt, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ pm := new(ProjectMember)
+ resp, err := s.client.Do(req, pm)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return pm, resp, err
+}
+
+// DeleteProjectMember removes a user from a project team.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/members.html#remove-a-member-from-a-group-or-project
+func (s *ProjectMembersService) DeleteProjectMember(pid interface{}, user int, options ...OptionFunc) (*Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, err
+ }
+ u := fmt.Sprintf("projects/%s/members/%d", url.QueryEscape(project), user)
+
+ req, err := s.client.NewRequest("DELETE", u, nil, options)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
diff --git a/vendor/github.com/xanzy/go-gitlab/project_snippets.go b/vendor/github.com/xanzy/go-gitlab/project_snippets.go
index e58a6a8..8ebad47 100644
--- a/vendor/github.com/xanzy/go-gitlab/project_snippets.go
+++ b/vendor/github.com/xanzy/go-gitlab/project_snippets.go
@@ -1,5 +1,5 @@
//
-// Copyright 2015, Sander van Harmelen
+// Copyright 2017, Sander van Harmelen
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -26,16 +26,14 @@ import (
// ProjectSnippetsService handles communication with the project snippets
// related methods of the GitLab API.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/project_snippets.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/project_snippets.html
type ProjectSnippetsService struct {
client *Client
}
// Snippet represents a GitLab project snippet.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/project_snippets.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/project_snippets.html
type Snippet struct {
ID int `json:"id"`
Title string `json:"title"`
@@ -48,7 +46,6 @@ type Snippet struct {
State string `json:"state"`
CreatedAt *time.Time `json:"created_at"`
} `json:"author"`
- ExpiresAt *time.Time `json:"expires_at"`
UpdatedAt *time.Time `json:"updated_at"`
CreatedAt *time.Time `json:"created_at"`
}
@@ -59,16 +56,14 @@ func (s Snippet) String() string {
// ListSnippetsOptions represents the available ListSnippets() options.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/project_snippets.md#list-snippets
+// GitLab API docs: https://docs.gitlab.com/ce/api/project_snippets.html#list-snippets
type ListSnippetsOptions struct {
ListOptions
}
// ListSnippets gets a list of project snippets.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/project_snippets.md#list-snippets
+// GitLab API docs: https://docs.gitlab.com/ce/api/project_snippets.html#list-snippets
func (s *ProjectSnippetsService) ListSnippets(pid interface{}, opt *ListSnippetsOptions, options ...OptionFunc) ([]*Snippet, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -93,7 +88,7 @@ func (s *ProjectSnippetsService) ListSnippets(pid interface{}, opt *ListSnippets
// GetSnippet gets a single project snippet
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/project_snippets.md#single-snippet
+// https://docs.gitlab.com/ce/api/project_snippets.html#single-snippet
func (s *ProjectSnippetsService) GetSnippet(pid interface{}, snippet int, options ...OptionFunc) (*Snippet, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -118,19 +113,19 @@ func (s *ProjectSnippetsService) GetSnippet(pid interface{}, snippet int, option
// CreateSnippetOptions represents the available CreateSnippet() options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/project_snippets.md#create-new-snippet
+// https://docs.gitlab.com/ce/api/project_snippets.html#create-new-snippet
type CreateSnippetOptions struct {
- Title *string `url:"title,omitempty" json:"title,omitempty"`
- FileName *string `url:"file_name,omitempty" json:"file_name,omitempty"`
- Code *string `url:"code,omitempty" json:"code,omitempty"`
- VisibilityLevel *VisibilityLevelValue `url:"visibility_level,omitempty" json:"visibility_level,omitempty"`
+ Title *string `url:"title,omitempty" json:"title,omitempty"`
+ FileName *string `url:"file_name,omitempty" json:"file_name,omitempty"`
+ Code *string `url:"code,omitempty" json:"code,omitempty"`
+ Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"`
}
// CreateSnippet creates a new project snippet. The user must have permission
// to create new snippets.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/project_snippets.md#create-new-snippet
+// https://docs.gitlab.com/ce/api/project_snippets.html#create-new-snippet
func (s *ProjectSnippetsService) CreateSnippet(pid interface{}, opt *CreateSnippetOptions, options ...OptionFunc) (*Snippet, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -155,19 +150,19 @@ func (s *ProjectSnippetsService) CreateSnippet(pid interface{}, opt *CreateSnipp
// UpdateSnippetOptions represents the available UpdateSnippet() options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/project_snippets.md#update-snippet
+// https://docs.gitlab.com/ce/api/project_snippets.html#update-snippet
type UpdateSnippetOptions struct {
- Title *string `url:"title,omitempty" json:"title,omitempty"`
- FileName *string `url:"file_name,omitempty" json:"file_name,omitempty"`
- Code *string `url:"code,omitempty" json:"code,omitempty"`
- VisibilityLevel *VisibilityLevelValue `url:"visibility_level,omitempty" json:"visibility_level,omitempty"`
+ Title *string `url:"title,omitempty" json:"title,omitempty"`
+ FileName *string `url:"file_name,omitempty" json:"file_name,omitempty"`
+ Code *string `url:"code,omitempty" json:"code,omitempty"`
+ Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"`
}
// UpdateSnippet updates an existing project snippet. The user must have
// permission to change an existing snippet.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/project_snippets.md#update-snippet
+// https://docs.gitlab.com/ce/api/project_snippets.html#update-snippet
func (s *ProjectSnippetsService) UpdateSnippet(pid interface{}, snippet int, opt *UpdateSnippetOptions, options ...OptionFunc) (*Snippet, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -194,7 +189,7 @@ func (s *ProjectSnippetsService) UpdateSnippet(pid interface{}, snippet int, opt
// code.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/project_snippets.md#delete-snippet
+// https://docs.gitlab.com/ce/api/project_snippets.html#delete-snippet
func (s *ProjectSnippetsService) DeleteSnippet(pid interface{}, snippet int, options ...OptionFunc) (*Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -213,7 +208,7 @@ func (s *ProjectSnippetsService) DeleteSnippet(pid interface{}, snippet int, opt
// SnippetContent returns the raw project snippet as plain text.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/project_snippets.md#snippet-content
+// https://docs.gitlab.com/ce/api/project_snippets.html#snippet-content
func (s *ProjectSnippetsService) SnippetContent(pid interface{}, snippet int, options ...OptionFunc) ([]byte, *Response, error) {
project, err := parseID(pid)
if err != nil {
diff --git a/vendor/github.com/xanzy/go-gitlab/projects.go b/vendor/github.com/xanzy/go-gitlab/projects.go
index 275cacd..317dae5 100644
--- a/vendor/github.com/xanzy/go-gitlab/projects.go
+++ b/vendor/github.com/xanzy/go-gitlab/projects.go
@@ -1,5 +1,5 @@
//
-// Copyright 2015, Sander van Harmelen
+// Copyright 2017, Sander van Harmelen
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -17,63 +17,67 @@
package gitlab
import (
+ "bytes"
"fmt"
+ "io"
+ "io/ioutil"
+ "mime/multipart"
"net/url"
+ "os"
"time"
)
// ProjectsService handles communication with the repositories related methods
// of the GitLab API.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/projects.html
type ProjectsService struct {
client *Client
}
// Project represents a GitLab project.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/projects.html
type Project struct {
- ID int `json:"id"`
- Description string `json:"description"`
- DefaultBranch string `json:"default_branch"`
- Public bool `json:"public"`
- VisibilityLevel VisibilityLevelValue `json:"visibility_level"`
- SSHURLToRepo string `json:"ssh_url_to_repo"`
- HTTPURLToRepo string `json:"http_url_to_repo"`
- WebURL string `json:"web_url"`
- TagList []string `json:"tag_list"`
- Owner *User `json:"owner"`
- Name string `json:"name"`
- NameWithNamespace string `json:"name_with_namespace"`
- Path string `json:"path"`
- PathWithNamespace string `json:"path_with_namespace"`
- IssuesEnabled bool `json:"issues_enabled"`
- OpenIssuesCount int `json:"open_issues_count"`
- MergeRequestsEnabled bool `json:"merge_requests_enabled"`
- ApprovalsBeforeMerge int `json:"approvals_before_merge"`
- BuildsEnabled bool `json:"builds_enabled"`
- WikiEnabled bool `json:"wiki_enabled"`
- SnippetsEnabled bool `json:"snippets_enabled"`
- ContainerRegistryEnabled bool `json:"container_registry_enabled"`
- CreatedAt *time.Time `json:"created_at,omitempty"`
- LastActivityAt *time.Time `json:"last_activity_at,omitempty"`
- CreatorID int `json:"creator_id"`
- Namespace *ProjectNamespace `json:"namespace"`
- Permissions *Permissions `json:"permissions"`
- Archived bool `json:"archived"`
- AvatarURL string `json:"avatar_url"`
- SharedRunnersEnabled bool `json:"shared_runners_enabled"`
- ForksCount int `json:"forks_count"`
- StarCount int `json:"star_count"`
- RunnersToken string `json:"runners_token"`
- PublicBuilds bool `json:"public_builds"`
- OnlyAllowMergeIfBuildSucceeds bool `json:"only_allow_merge_if_build_succeeds"`
- OnlyAllowMergeIfAllDiscussionsAreResolved bool `json:"only_allow_merge_if_all_discussions_are_resolved"`
- LFSEnabled bool `json:"lfs_enabled"`
- RequestAccessEnabled bool `json:"request_access_enabled"`
+ ID int `json:"id"`
+ Description string `json:"description"`
+ DefaultBranch string `json:"default_branch"`
+ Public bool `json:"public"`
+ Visibility VisibilityValue `json:"visibility"`
+ SSHURLToRepo string `json:"ssh_url_to_repo"`
+ HTTPURLToRepo string `json:"http_url_to_repo"`
+ WebURL string `json:"web_url"`
+ TagList []string `json:"tag_list"`
+ Owner *User `json:"owner"`
+ Name string `json:"name"`
+ NameWithNamespace string `json:"name_with_namespace"`
+ Path string `json:"path"`
+ PathWithNamespace string `json:"path_with_namespace"`
+ IssuesEnabled bool `json:"issues_enabled"`
+ OpenIssuesCount int `json:"open_issues_count"`
+ MergeRequestsEnabled bool `json:"merge_requests_enabled"`
+ ApprovalsBeforeMerge int `json:"approvals_before_merge"`
+ JobsEnabled bool `json:"jobs_enabled"`
+ WikiEnabled bool `json:"wiki_enabled"`
+ SnippetsEnabled bool `json:"snippets_enabled"`
+ ContainerRegistryEnabled bool `json:"container_registry_enabled"`
+ CreatedAt *time.Time `json:"created_at,omitempty"`
+ LastActivityAt *time.Time `json:"last_activity_at,omitempty"`
+ CreatorID int `json:"creator_id"`
+ Namespace *ProjectNamespace `json:"namespace"`
+ Permissions *Permissions `json:"permissions"`
+ Archived bool `json:"archived"`
+ AvatarURL string `json:"avatar_url"`
+ SharedRunnersEnabled bool `json:"shared_runners_enabled"`
+ ForksCount int `json:"forks_count"`
+ StarCount int `json:"star_count"`
+ RunnersToken string `json:"runners_token"`
+ PublicJobs bool `json:"public_jobs"`
+ OnlyAllowMergeIfPipelineSucceeds bool `json:"only_allow_merge_if_pipeline_succeeds"`
+ OnlyAllowMergeIfAllDiscussionsAreResolved bool `json:"only_allow_merge_if_all_discussions_are_resolved"`
+ LFSEnabled bool `json:"lfs_enabled"`
+ RequestAccessEnabled bool `json:"request_access_enabled"`
+ ForkedFromProject *ForkParent `json:"forked_from_project"`
SharedWithGroups []struct {
GroupID int `json:"group_id"`
GroupName string `json:"group_name"`
@@ -84,20 +88,20 @@ type Project struct {
// Repository represents a repository.
type Repository struct {
- Name string `json:"name"`
- Description string `json:"description"`
- WebURL string `json:"web_url"`
- AvatarURL string `json:"avatar_url"`
- GitSSHURL string `json:"git_ssh_url"`
- GitHTTPURL string `json:"git_http_url"`
- Namespace string `json:"namespace"`
- VisibilityLevel int `json:"visibility_level"`
- PathWithNamespace string `json:"path_with_namespace"`
- DefaultBranch string `json:"default_branch"`
- Homepage string `json:"homepage"`
- URL string `json:"url"`
- SSHURL string `json:"ssh_url"`
- HTTPURL string `json:"http_url"`
+ Name string `json:"name"`
+ Description string `json:"description"`
+ WebURL string `json:"web_url"`
+ AvatarURL string `json:"avatar_url"`
+ GitSSHURL string `json:"git_ssh_url"`
+ GitHTTPURL string `json:"git_http_url"`
+ Namespace string `json:"namespace"`
+ Visibility VisibilityValue `json:"visibility"`
+ PathWithNamespace string `json:"path_with_namespace"`
+ DefaultBranch string `json:"default_branch"`
+ Homepage string `json:"homepage"`
+ URL string `json:"url"`
+ SSHURL string `json:"ssh_url"`
+ HTTPURL string `json:"http_url"`
}
// ProjectNamespace represents a project namespace.
@@ -113,10 +117,10 @@ type ProjectNamespace struct {
// StorageStatistics represents a statistics record for a group or project.
type StorageStatistics struct {
- StorageSize int64 `json:"storage_size"`
- RepositorySize int64 `json:"repository_size"`
- LfsObjectsSize int64 `json:"lfs_objects_size"`
- BuildArtifactsSize int64 `json:"build_artifacts_size"`
+ StorageSize int64 `json:"storage_size"`
+ RepositorySize int64 `json:"repository_size"`
+ LfsObjectsSize int64 `json:"lfs_objects_size"`
+ JobArtifactsSize int64 `json:"job_artifacts_size"`
}
// ProjectStatistics represents a statistics record for a project.
@@ -125,7 +129,7 @@ type ProjectStatistics struct {
CommitCount int `json:"commit_count"`
}
-// Permissions represents premissions.
+// Permissions represents permissions.
type Permissions struct {
ProjectAccess *ProjectAccess `json:"project_access"`
GroupAccess *GroupAccess `json:"group_access"`
@@ -143,29 +147,41 @@ type GroupAccess struct {
NotificationLevel NotificationLevelValue `json:"notification_level"`
}
+// ForkParent represents the parent project when this is a fork.
+type ForkParent struct {
+ HTTPURLToRepo string `json:"http_url_to_repo"`
+ ID int `json:"id"`
+ Name string `json:"name"`
+ NameWithNamespace string `json:"name_with_namespace"`
+ Path string `json:"path"`
+ PathWithNamespace string `json:"path_with_namespace"`
+ WebURL string `json:"web_url"`
+}
+
func (s Project) String() string {
return Stringify(s)
}
// ListProjectsOptions represents the available ListProjects() options.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#list-projects
+// GitLab API docs: https://docs.gitlab.com/ce/api/projects.html#list-projects
type ListProjectsOptions struct {
ListOptions
- Archived *bool `url:"archived,omitempty" json:"archived,omitempty"`
- OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"`
- Sort *string `url:"sort,omitempty" json:"sort,omitempty"`
- Search *string `url:"search,omitempty" json:"search,omitempty"`
- Simple *bool `url:"simple,omitempty" json:"simple,omitempty"`
- Visibility *string `url:"visibility,omitempty" json:"visibility,omitempty"`
- Statistics *bool `url:"statistics,omitempty" json:"statistics,omitempty"`
+ Archived *bool `url:"archived,omitempty" json:"archived,omitempty"`
+ OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"`
+ Sort *string `url:"sort,omitempty" json:"sort,omitempty"`
+ Search *string `url:"search,omitempty" json:"search,omitempty"`
+ Simple *bool `url:"simple,omitempty" json:"simple,omitempty"`
+ Owned *bool `url:"owned,omitempty" json:"owned,omitempty"`
+ Membership *bool `url:"membership,omitempty" json:"membership,omitempty"`
+ Starred *bool `url:"starred,omitempty" json:"starred,omitempty"`
+ Statistics *bool `url:"statistics,omitempty" json:"statistics,omitempty"`
+ Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"`
}
// ListProjects gets a list of projects accessible by the authenticated user.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#list-projects
+// GitLab API docs: https://docs.gitlab.com/ce/api/projects.html#list-projects
func (s *ProjectsService) ListProjects(opt *ListProjectsOptions, options ...OptionFunc) ([]*Project, *Response, error) {
req, err := s.client.NewRequest("GET", "projects", opt, options)
if err != nil {
@@ -181,70 +197,11 @@ func (s *ProjectsService) ListProjects(opt *ListProjectsOptions, options ...Opti
return p, resp, err
}
-// ListOwnedProjects gets a list of projects which are owned by the
-// authenticated user.
-//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#list-owned-projects
-func (s *ProjectsService) ListOwnedProjects(opt *ListProjectsOptions, options ...OptionFunc) ([]*Project, *Response, error) {
- req, err := s.client.NewRequest("GET", "projects/owned", opt, options)
- if err != nil {
- return nil, nil, err
- }
-
- var p []*Project
- resp, err := s.client.Do(req, &p)
- if err != nil {
- return nil, resp, err
- }
-
- return p, resp, err
-}
-
-// ListStarredProjects gets a list of projects which are starred by the
-// authenticated user.
-//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#list-starred-projects
-func (s *ProjectsService) ListStarredProjects(opt *ListProjectsOptions, options ...OptionFunc) ([]*Project, *Response, error) {
- req, err := s.client.NewRequest("GET", "projects/starred", opt, options)
- if err != nil {
- return nil, nil, err
- }
-
- var p []*Project
- resp, err := s.client.Do(req, &p)
- if err != nil {
- return nil, resp, err
- }
-
- return p, resp, err
-}
-
-// ListAllProjects gets a list of all GitLab projects (admin only).
-//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#list-all-projects
-func (s *ProjectsService) ListAllProjects(opt *ListProjectsOptions, options ...OptionFunc) ([]*Project, *Response, error) {
- req, err := s.client.NewRequest("GET", "projects/all", opt, options)
- if err != nil {
- return nil, nil, err
- }
-
- var p []*Project
- resp, err := s.client.Do(req, &p)
- if err != nil {
- return nil, resp, err
- }
-
- return p, resp, err
-}
-
// GetProject gets a specific project, identified by project ID or
// NAMESPACE/PROJECT_NAME, which is owned by the authenticated user.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#get-single-project
+// https://docs.gitlab.com/ce/api/projects.html#get-single-project
func (s *ProjectsService) GetProject(pid interface{}, options ...OptionFunc) (*Project, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -266,42 +223,10 @@ func (s *ProjectsService) GetProject(pid interface{}, options ...OptionFunc) (*P
return p, resp, err
}
-// SearchProjectsOptions represents the available SearchProjects() options.
-//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#search-for-projects-by-name
-type SearchProjectsOptions struct {
- ListOptions
- OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"`
- Sort *string `url:"sort,omitempty" json:"sort,omitempty"`
-}
-
-// SearchProjects searches for projects by name which are accessible to the
-// authenticated user.
-//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#search-for-projects-by-name
-func (s *ProjectsService) SearchProjects(query string, opt *SearchProjectsOptions, options ...OptionFunc) ([]*Project, *Response, error) {
- u := fmt.Sprintf("projects/search/%s", query)
-
- req, err := s.client.NewRequest("GET", u, opt, options)
- if err != nil {
- return nil, nil, err
- }
-
- var p []*Project
- resp, err := s.client.Do(req, &p)
- if err != nil {
- return nil, resp, err
- }
-
- return p, resp, err
-}
-
// ProjectEvent represents a GitLab project event.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#get-project-events
+// https://docs.gitlab.com/ce/api/projects.html#get-project-events
type ProjectEvent struct {
Title interface{} `json:"title"`
ProjectID int `json:"project_id"`
@@ -330,7 +255,7 @@ func (s ProjectEvent) String() string {
// GetProjectEventsOptions represents the available GetProjectEvents() options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#get-project-events
+// https://docs.gitlab.com/ce/api/projects.html#get-project-events
type GetProjectEventsOptions struct {
ListOptions
}
@@ -339,7 +264,7 @@ type GetProjectEventsOptions struct {
// newest to latest.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#get-project-events
+// https://docs.gitlab.com/ce/api/projects.html#get-project-events
func (s *ProjectsService) GetProjectEvents(pid interface{}, opt *GetProjectEventsOptions, options ...OptionFunc) ([]*ProjectEvent, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -363,33 +288,32 @@ func (s *ProjectsService) GetProjectEvents(pid interface{}, opt *GetProjectEvent
// CreateProjectOptions represents the available CreateProjects() options.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#create-project
+// GitLab API docs: https://docs.gitlab.com/ce/api/projects.html#create-project
type CreateProjectOptions struct {
- Name *string `url:"name,omitempty" json:"name,omitempty"`
- Path *string `url:"path,omitempty" json:"path,omitempty"`
- NamespaceID *int `url:"namespace_id,omitempty" json:"namespace_id,omitempty"`
- Description *string `url:"description,omitempty" json:"description,omitempty"`
- IssuesEnabled *bool `url:"issues_enabled,omitempty" json:"issues_enabled,omitempty"`
- MergeRequestsEnabled *bool `url:"merge_requests_enabled,omitempty" json:"merge_requests_enabled,omitempty"`
- BuildsEnabled *bool `url:"builds_enabled,omitempty" json:"builds_enabled,omitempty"`
- WikiEnabled *bool `url:"wiki_enabled,omitempty" json:"wiki_enabled,omitempty"`
- SnippetsEnabled *bool `url:"snippets_enabled,omitempty" json:"snippets_enabled,omitempty"`
- ContainerRegistryEnabled *bool `url:"container_registry_enabled,omitempty" json:"container_registry_enabled,omitempty"`
- SharedRunnersEnabled *bool `url:"shared_runners_enabled,omitempty" json:"shared_runners_enabled,omitempty"`
- Public *bool `url:"public,omitempty" json:"public,omitempty"`
- VisibilityLevel *VisibilityLevelValue `url:"visibility_level,omitempty" json:"visibility_level,omitempty"`
- ImportURL *string `url:"import_url,omitempty" json:"import_url,omitempty"`
- PublicBuilds *bool `url:"public_builds,omitempty" json:"public_builds,omitempty"`
- OnlyAllowMergeIfBuildSucceeds *bool `url:"only_allow_merge_if_build_succeeds,omitempty" json:"only_allow_merge_if_build_succeeds,omitempty"`
- LFSEnabled *bool `url:"lfs_enabled,omitempty" json:"lfs_enabled,omitempty"`
- RequestAccessEnabled *bool `url:"request_access_enabled,omitempty" json:"request_access_enabled,omitempty"`
+ Name *string `url:"name,omitempty" json:"name,omitempty"`
+ Path *string `url:"path,omitempty" json:"path,omitempty"`
+ DefaultBranch *string `url:"default_branch,omitempty" json:"default_branch,omitempty"`
+ NamespaceID *int `url:"namespace_id,omitempty" json:"namespace_id,omitempty"`
+ Description *string `url:"description,omitempty" json:"description,omitempty"`
+ IssuesEnabled *bool `url:"issues_enabled,omitempty" json:"issues_enabled,omitempty"`
+ MergeRequestsEnabled *bool `url:"merge_requests_enabled,omitempty" json:"merge_requests_enabled,omitempty"`
+ JobsEnabled *bool `url:"jobs_enabled,omitempty" json:"jobs_enabled,omitempty"`
+ WikiEnabled *bool `url:"wiki_enabled,omitempty" json:"wiki_enabled,omitempty"`
+ SnippetsEnabled *bool `url:"snippets_enabled,omitempty" json:"snippets_enabled,omitempty"`
+ ContainerRegistryEnabled *bool `url:"container_registry_enabled,omitempty" json:"container_registry_enabled,omitempty"`
+ SharedRunnersEnabled *bool `url:"shared_runners_enabled,omitempty" json:"shared_runners_enabled,omitempty"`
+ Visibility *VisibilityValue `url:"visibility,omitempty" json:"visibility,omitempty"`
+ ImportURL *string `url:"import_url,omitempty" json:"import_url,omitempty"`
+ PublicJobs *bool `url:"public_jobs,omitempty" json:"public_jobs,omitempty"`
+ OnlyAllowMergeIfPipelineSucceeds *bool `url:"only_allow_merge_if_pipeline_succeeds,omitempty" json:"only_allow_merge_if_pipeline_succeeds,omitempty"`
+ OnlyAllowMergeIfAllDiscussionsAreResolved *bool `url:"only_allow_merge_if_all_discussions_are_resolved,omitempty" json:"only_allow_merge_if_all_discussions_are_resolved,omitempty"`
+ LFSEnabled *bool `url:"lfs_enabled,omitempty" json:"lfs_enabled,omitempty"`
+ RequestAccessEnabled *bool `url:"request_access_enabled,omitempty" json:"request_access_enabled,omitempty"`
}
// CreateProject creates a new project owned by the authenticated user.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#create-project
+// GitLab API docs: https://docs.gitlab.com/ce/api/projects.html#create-project
func (s *ProjectsService) CreateProject(opt *CreateProjectOptions, options ...OptionFunc) (*Project, *Response, error) {
req, err := s.client.NewRequest("POST", "projects", opt, options)
if err != nil {
@@ -409,25 +333,14 @@ func (s *ProjectsService) CreateProject(opt *CreateProjectOptions, options ...Op
// options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#create-project-for-user
-type CreateProjectForUserOptions struct {
- Name *string `url:"name,omitempty" json:"name,omitempty"`
- Description *string `url:"description,omitempty" json:"description,omitempty"`
- DefaultBranch *string `url:"default_branch,omitempty" json:"default_branch,omitempty"`
- IssuesEnabled *bool `url:"issues_enabled,omitempty" json:"issues_enabled,omitempty"`
- MergeRequestsEnabled *bool `url:"merge_requests_enabled,omitempty" json:"merge_requests_enabled,omitempty"`
- WikiEnabled *bool `url:"wiki_enabled,omitempty" json:"wiki_enabled,omitempty"`
- SnippetsEnabled *bool `url:"snippets_enabled,omitempty" json:"snippets_enabled,omitempty"`
- Public *bool `url:"public,omitempty" json:"public,omitempty"`
- VisibilityLevel *VisibilityLevelValue `url:"visibility_level,omitempty" json:"visibility_level,omitempty"`
- ImportURL *string `url:"import_url,omitempty" json:"import_url,omitempty"`
-}
+// https://docs.gitlab.com/ce/api/projects.html#create-project-for-user
+type CreateProjectForUserOptions CreateProjectOptions
// CreateProjectForUser creates a new project owned by the specified user.
// Available only for admins.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#create-project-for-user
+// https://docs.gitlab.com/ce/api/projects.html#create-project-for-user
func (s *ProjectsService) CreateProjectForUser(user int, opt *CreateProjectForUserOptions, options ...OptionFunc) (*Project, *Response, error) {
u := fmt.Sprintf("projects/user/%d", user)
@@ -447,35 +360,12 @@ func (s *ProjectsService) CreateProjectForUser(user int, opt *CreateProjectForUs
// EditProjectOptions represents the available EditProject() options.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#edit-project
-type EditProjectOptions struct {
- Name *string `url:"name,omitempty" json:"name,omitempty"`
- Path *string `url:"path,omitempty" json:"path,omitempty"`
- Description *string `url:"description,omitempty" json:"description,omitempty"`
- DefaultBranch *string `url:"default_branch,omitempty" json:"default_branch,omitempty"`
- IssuesEnabled *bool `url:"issues_enabled,omitempty" json:"issues_enabled,omitempty"`
- MergeRequestsEnabled *bool `url:"merge_requests_enabled,omitempty" json:"merge_requests_enabled,omitempty"`
- ApprovalsBeforeMerge *int `url:"approvals_before_merge,omitempty" json:"approvals_before_merge,omitempty"`
- BuildsEnabled *bool `url:"builds_enabled,omitempty" json:"builds_enabled,omitempty"`
- WikiEnabled *bool `url:"wiki_enabled,omitempty" json:"wiki_enabled,omitempty"`
- SnippetsEnabled *bool `url:"snippets_enabled,omitempty" json:"snippets_enabled,omitempty"`
- ContainerRegistryEnabled *bool `url:"container_registry_enabled,omitempty" json:"container_registry_enabled,omitempty"`
- SharedRunnersEnabled *bool `url:"shared_runners_enabled,omitempty" json:"shared_runners_enabled,omitempty"`
- Public *bool `url:"public,omitempty" json:"public,omitempty"`
- VisibilityLevel *VisibilityLevelValue `url:"visibility_level,omitempty" json:"visibility_level,omitempty"`
- ImportURL *bool `url:"import_url,omitempty" json:"import_url,omitempty"`
- PublicBuilds *bool `url:"public_builds,omitempty" json:"public_builds,omitempty"`
- OnlyAllowMergeIfBuildSucceeds *bool `url:"only_allow_merge_if_build_succeeds,omitempty" json:"only_allow_merge_if_build_succeeds,omitempty"`
- OnlyAllowMergeIfAllDiscussionsAreResolved *bool `url:"only_allow_merge_if_all_discussions_are_resolved,omitempty" json:"only_allow_merge_if_all_discussions_are_resolved,omitempty"`
- LFSEnabled *bool `url:"lfs_enabled,omitempty" json:"lfs_enabled,omitempty"`
- RequestAccessEnabled *bool `url:"request_access_enabled,omitempty" json:"request_access_enabled,omitempty"`
-}
+// GitLab API docs: https://docs.gitlab.com/ce/api/projects.html#edit-project
+type EditProjectOptions CreateProjectOptions
// EditProject updates an existing project.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#edit-project
+// GitLab API docs: https://docs.gitlab.com/ce/api/projects.html#edit-project
func (s *ProjectsService) EditProject(pid interface{}, opt *EditProjectOptions, options ...OptionFunc) (*Project, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -500,14 +390,13 @@ func (s *ProjectsService) EditProject(pid interface{}, opt *EditProjectOptions,
// ForkProject forks a project into the user namespace of the authenticated
// user.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#fork-project
+// GitLab API docs: https://docs.gitlab.com/ce/api/projects.html#fork-project
func (s *ProjectsService) ForkProject(pid interface{}, options ...OptionFunc) (*Project, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
- u := fmt.Sprintf("projects/fork/%s", url.QueryEscape(project))
+ u := fmt.Sprintf("projects/%s/fork", url.QueryEscape(project))
req, err := s.client.NewRequest("POST", u, nil, options)
if err != nil {
@@ -523,182 +412,147 @@ func (s *ProjectsService) ForkProject(pid interface{}, options ...OptionFunc) (*
return p, resp, err
}
-// DeleteProject removes a project including all associated resources
-// (issues, merge requests etc.)
+// StarProject stars a given the project.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#remove-project
-func (s *ProjectsService) DeleteProject(pid interface{}, options ...OptionFunc) (*Response, error) {
+// https://docs.gitlab.com/ce/api/projects.html#star-a-project
+func (s *ProjectsService) StarProject(pid interface{}, options ...OptionFunc) (*Project, *Response, error) {
project, err := parseID(pid)
if err != nil {
- return nil, err
+ return nil, nil, err
}
- u := fmt.Sprintf("projects/%s", url.QueryEscape(project))
+ u := fmt.Sprintf("projects/%s/star", url.QueryEscape(project))
- req, err := s.client.NewRequest("DELETE", u, nil, options)
+ req, err := s.client.NewRequest("POST", u, nil, options)
if err != nil {
- return nil, err
+ return nil, nil, err
}
- return s.client.Do(req, nil)
-}
-
-// ProjectMember represents a project member.
-//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#list-project-team-members
-type ProjectMember struct {
- ID int `json:"id"`
- Username string `json:"username"`
- Email string `json:"email"`
- Name string `json:"name"`
- State string `json:"state"`
- CreatedAt *time.Time `json:"created_at"`
- AccessLevel AccessLevelValue `json:"access_level"`
-}
+ p := new(Project)
+ resp, err := s.client.Do(req, p)
+ if err != nil {
+ return nil, resp, err
+ }
-// ListProjectMembersOptions represents the available ListProjectMembers()
-// options.
-//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#list-project-team-members
-type ListProjectMembersOptions struct {
- ListOptions
- Query *string `url:"query,omitempty" json:"query,omitempty"`
+ return p, resp, err
}
-// ListProjectMembers gets a list of a project's team members.
+// UnstarProject unstars a given project.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#list-project-team-members
-func (s *ProjectsService) ListProjectMembers(pid interface{}, opt *ListProjectMembersOptions, options ...OptionFunc) ([]*ProjectMember, *Response, error) {
+// https://docs.gitlab.com/ce/api/projects.html#unstar-a-project
+func (s *ProjectsService) UnstarProject(pid interface{}, options ...OptionFunc) (*Project, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
- u := fmt.Sprintf("projects/%s/members", url.QueryEscape(project))
+ u := fmt.Sprintf("projects/%s/unstar", url.QueryEscape(project))
- req, err := s.client.NewRequest("GET", u, opt, options)
+ req, err := s.client.NewRequest("POST", u, nil, options)
if err != nil {
return nil, nil, err
}
- var pm []*ProjectMember
- resp, err := s.client.Do(req, &pm)
+ p := new(Project)
+ resp, err := s.client.Do(req, p)
if err != nil {
return nil, resp, err
}
- return pm, resp, err
+ return p, resp, err
}
-// GetProjectMember gets a project team member.
+// ArchiveProject archives the project if the user is either admin or the
+// project owner of this project.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#get-project-team-member
-func (s *ProjectsService) GetProjectMember(pid interface{}, user int, options ...OptionFunc) (*ProjectMember, *Response, error) {
+// https://docs.gitlab.com/ce/api/projects.html#archive-a-project
+func (s *ProjectsService) ArchiveProject(pid interface{}, options ...OptionFunc) (*Project, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
- u := fmt.Sprintf("projects/%s/members/%d", url.QueryEscape(project), user)
+ u := fmt.Sprintf("projects/%s/archive", url.QueryEscape(project))
- req, err := s.client.NewRequest("GET", u, nil, options)
+ req, err := s.client.NewRequest("POST", u, nil, options)
if err != nil {
return nil, nil, err
}
- pm := new(ProjectMember)
- resp, err := s.client.Do(req, pm)
+ p := new(Project)
+ resp, err := s.client.Do(req, p)
if err != nil {
return nil, resp, err
}
- return pm, resp, err
-}
-
-// AddProjectMemberOptions represents the available AddProjectMember() options.
-//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#add-project-team-member
-type AddProjectMemberOptions struct {
- UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"`
- AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"`
+ return p, resp, err
}
-// AddProjectMember adds a user to a project team. This is an idempotent
-// method and can be called multiple times with the same parameters. Adding
-// team membership to a user that is already a member does not affect the
-// existing membership.
+// UnarchiveProject unarchives the project if the user is either admin or
+// the project owner of this project.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#add-project-team-member
-func (s *ProjectsService) AddProjectMember(pid interface{}, opt *AddProjectMemberOptions, options ...OptionFunc) (*ProjectMember, *Response, error) {
+// https://docs.gitlab.com/ce/api/projects.html#unarchive-a-project
+func (s *ProjectsService) UnarchiveProject(pid interface{}, options ...OptionFunc) (*Project, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
- u := fmt.Sprintf("projects/%s/members", url.QueryEscape(project))
+ u := fmt.Sprintf("projects/%s/unarchive", url.QueryEscape(project))
- req, err := s.client.NewRequest("POST", u, opt, options)
+ req, err := s.client.NewRequest("POST", u, nil, options)
if err != nil {
return nil, nil, err
}
- pm := new(ProjectMember)
- resp, err := s.client.Do(req, pm)
+ p := new(Project)
+ resp, err := s.client.Do(req, p)
if err != nil {
return nil, resp, err
}
- return pm, resp, err
-}
-
-// EditProjectMemberOptions represents the available EditProjectMember() options.
-//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#edit-project-team-member
-type EditProjectMemberOptions struct {
- AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"`
+ return p, resp, err
}
-// EditProjectMember updates a project team member to a specified access level..
+// DeleteProject removes a project including all associated resources
+// (issues, merge requests etc.)
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#edit-project-team-member
-func (s *ProjectsService) EditProjectMember(pid interface{}, user int, opt *EditProjectMemberOptions, options ...OptionFunc) (*ProjectMember, *Response, error) {
+// GitLab API docs: https://docs.gitlab.com/ce/api/projects.html#remove-project
+func (s *ProjectsService) DeleteProject(pid interface{}, options ...OptionFunc) (*Response, error) {
project, err := parseID(pid)
if err != nil {
- return nil, nil, err
+ return nil, err
}
- u := fmt.Sprintf("projects/%s/members/%d", url.QueryEscape(project), user)
+ u := fmt.Sprintf("projects/%s", url.QueryEscape(project))
- req, err := s.client.NewRequest("PUT", u, opt, options)
+ req, err := s.client.NewRequest("DELETE", u, nil, options)
if err != nil {
- return nil, nil, err
+ return nil, err
}
- pm := new(ProjectMember)
- resp, err := s.client.Do(req, pm)
- if err != nil {
- return nil, resp, err
- }
+ return s.client.Do(req, nil)
+}
- return pm, resp, err
+// ShareWithGroupOptions represents options to share project with groups
+//
+// GitLab API docs: https://docs.gitlab.com/ce/api/projects.html#share-project-with-group
+type ShareWithGroupOptions struct {
+ GroupID *int `url:"group_id" json:"group_id"`
+ GroupAccess *AccessLevelValue `url:"group_access" json:"group_access"`
+ ExpiresAt *string `url:"expires_at" json:"expires_at"`
}
-// DeleteProjectMember removes a user from a project team.
+// ShareProjectWithGroup allows to share a project with a group.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#remove-project-team-member
-func (s *ProjectsService) DeleteProjectMember(pid interface{}, user int, options ...OptionFunc) (*Response, error) {
+// GitLab API docs: https://docs.gitlab.com/ce/api/projects.html#share-project-with-group
+func (s *ProjectsService) ShareProjectWithGroup(pid interface{}, opt *ShareWithGroupOptions, options ...OptionFunc) (*Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, err
}
- u := fmt.Sprintf("projects/%s/members/%d", url.QueryEscape(project), user)
+ u := fmt.Sprintf("projects/%s/share", url.QueryEscape(project))
- req, err := s.client.NewRequest("DELETE", u, nil, options)
+ req, err := s.client.NewRequest("POST", u, opt, options)
if err != nil {
return nil, err
}
@@ -706,10 +560,24 @@ func (s *ProjectsService) DeleteProjectMember(pid interface{}, user int, options
return s.client.Do(req, nil)
}
+// ProjectMember represents a project member.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/projects.html#list-project-team-members
+type ProjectMember struct {
+ ID int `json:"id"`
+ Username string `json:"username"`
+ Email string `json:"email"`
+ Name string `json:"name"`
+ State string `json:"state"`
+ CreatedAt *time.Time `json:"created_at"`
+ AccessLevel AccessLevelValue `json:"access_level"`
+}
+
// ProjectHook represents a project hook.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#list-project-hooks
+// https://docs.gitlab.com/ce/api/projects.html#list-project-hooks
type ProjectHook struct {
ID int `json:"id"`
URL string `json:"url"`
@@ -719,7 +587,7 @@ type ProjectHook struct {
MergeRequestsEvents bool `json:"merge_requests_events"`
TagPushEvents bool `json:"tag_push_events"`
NoteEvents bool `json:"note_events"`
- BuildEvents bool `json:"build_events"`
+ JobEvents bool `json:"job_events"`
PipelineEvents bool `json:"pipeline_events"`
WikiPageEvents bool `json:"wiki_page_events"`
EnableSSLVerification bool `json:"enable_ssl_verification"`
@@ -728,8 +596,7 @@ type ProjectHook struct {
// ListProjectHooksOptions represents the available ListProjectHooks() options.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#list-project-hooks
+// GitLab API docs: https://docs.gitlab.com/ce/api/projects.html#list-project-hooks
type ListProjectHooksOptions struct {
ListOptions
}
@@ -737,7 +604,7 @@ type ListProjectHooksOptions struct {
// ListProjectHooks gets a list of project hooks.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#list-project-hooks
+// https://docs.gitlab.com/ce/api/projects.html#list-project-hooks
func (s *ProjectsService) ListProjectHooks(pid interface{}, opt *ListProjectHooksOptions, options ...OptionFunc) ([]*ProjectHook, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -762,7 +629,7 @@ func (s *ProjectsService) ListProjectHooks(pid interface{}, opt *ListProjectHook
// GetProjectHook gets a specific hook for a project.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#get-project-hook
+// https://docs.gitlab.com/ce/api/projects.html#get-project-hook
func (s *ProjectsService) GetProjectHook(pid interface{}, hook int, options ...OptionFunc) (*ProjectHook, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -787,7 +654,7 @@ func (s *ProjectsService) GetProjectHook(pid interface{}, hook int, options ...O
// AddProjectHookOptions represents the available AddProjectHook() options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#add-project-hook
+// https://docs.gitlab.com/ce/api/projects.html#add-project-hook
type AddProjectHookOptions struct {
URL *string `url:"url,omitempty" json:"url,omitempty"`
PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"`
@@ -795,7 +662,7 @@ type AddProjectHookOptions struct {
MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"`
TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"`
NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"`
- BuildEvents *bool `url:"build_events,omitempty" json:"build_events,omitempty"`
+ JobEvents *bool `url:"job_events,omitempty" json:"job_events,omitempty"`
PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"`
WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"`
EnableSSLVerification *bool `url:"enable_ssl_verification,omitempty" json:"enable_ssl_verification,omitempty"`
@@ -805,7 +672,7 @@ type AddProjectHookOptions struct {
// AddProjectHook adds a hook to a specified project.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#add-project-hook
+// https://docs.gitlab.com/ce/api/projects.html#add-project-hook
func (s *ProjectsService) AddProjectHook(pid interface{}, opt *AddProjectHookOptions, options ...OptionFunc) (*ProjectHook, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -830,7 +697,7 @@ func (s *ProjectsService) AddProjectHook(pid interface{}, opt *AddProjectHookOpt
// EditProjectHookOptions represents the available EditProjectHook() options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#edit-project-hook
+// https://docs.gitlab.com/ce/api/projects.html#edit-project-hook
type EditProjectHookOptions struct {
URL *string `url:"url,omitempty" json:"url,omitempty"`
PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"`
@@ -838,7 +705,7 @@ type EditProjectHookOptions struct {
MergeRequestsEvents *bool `url:"merge_requests_events,omitempty" json:"merge_requests_events,omitempty"`
TagPushEvents *bool `url:"tag_push_events,omitempty" json:"tag_push_events,omitempty"`
NoteEvents *bool `url:"note_events,omitempty" json:"note_events,omitempty"`
- BuildEvents *bool `url:"build_events,omitempty" json:"build_events,omitempty"`
+ JobEvents *bool `url:"job_events,omitempty" json:"job_events,omitempty"`
PipelineEvents *bool `url:"pipeline_events,omitempty" json:"pipeline_events,omitempty"`
WikiPageEvents *bool `url:"wiki_page_events,omitempty" json:"wiki_page_events,omitempty"`
EnableSSLVerification *bool `url:"enable_ssl_verification,omitempty" json:"enable_ssl_verification,omitempty"`
@@ -848,7 +715,7 @@ type EditProjectHookOptions struct {
// EditProjectHook edits a hook for a specified project.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#edit-project-hook
+// https://docs.gitlab.com/ce/api/projects.html#edit-project-hook
func (s *ProjectsService) EditProjectHook(pid interface{}, hook int, opt *EditProjectHookOptions, options ...OptionFunc) (*ProjectHook, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -874,7 +741,7 @@ func (s *ProjectsService) EditProjectHook(pid interface{}, hook int, opt *EditPr
// method and can be called multiple times. Either the hook is available or not.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#delete-project-hook
+// https://docs.gitlab.com/ce/api/projects.html#delete-project-hook
func (s *ProjectsService) DeleteProjectHook(pid interface{}, hook int, options ...OptionFunc) (*Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -890,124 +757,10 @@ func (s *ProjectsService) DeleteProjectHook(pid interface{}, hook int, options .
return s.client.Do(req, nil)
}
-// BuildTrigger represents a project build trigger.
-//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/build_triggers.md#build-triggers
-type BuildTrigger struct {
- CreatedAt *time.Time `json:"created_at"`
- DeletedAt *time.Time `json:"deleted_at"`
- LastUsed *time.Time `json:"last_used"`
- Token string `json:"token"`
- UpdatedAt *time.Time `json:"updated_at"`
-}
-
-// ListBuildTriggersOptions represents the available ListBuildTriggers() options.
-//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/build_triggers.md#list-project-triggers
-type ListBuildTriggersOptions struct {
- ListOptions
-}
-
-// ListBuildTriggers gets a list of project triggers.
-//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/build_triggers.md#list-project-triggers
-func (s *ProjectsService) ListBuildTriggers(pid interface{}, opt *ListBuildTriggersOptions, options ...OptionFunc) ([]*BuildTrigger, *Response, error) {
- project, err := parseID(pid)
- if err != nil {
- return nil, nil, err
- }
- u := fmt.Sprintf("projects/%s/triggers", url.QueryEscape(project))
-
- req, err := s.client.NewRequest("GET", u, opt, options)
- if err != nil {
- return nil, nil, err
- }
-
- var bt []*BuildTrigger
- resp, err := s.client.Do(req, &bt)
- if err != nil {
- return nil, resp, err
- }
-
- return bt, resp, err
-}
-
-// GetBuildTrigger gets a specific build trigger for a project.
-//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/build_triggers.md#get-trigger-details
-func (s *ProjectsService) GetBuildTrigger(pid interface{}, token string, options ...OptionFunc) (*BuildTrigger, *Response, error) {
- project, err := parseID(pid)
- if err != nil {
- return nil, nil, err
- }
- u := fmt.Sprintf("projects/%s/triggers/%v", url.QueryEscape(project), token)
-
- req, err := s.client.NewRequest("GET", u, nil, options)
- if err != nil {
- return nil, nil, err
- }
-
- bt := new(BuildTrigger)
- resp, err := s.client.Do(req, bt)
- if err != nil {
- return nil, resp, err
- }
-
- return bt, resp, err
-}
-
-// AddBuildTrigger adds a build trigger to a specified project.
-//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/build_triggers.md#create-a-project-trigger
-func (s *ProjectsService) AddBuildTrigger(pid interface{}, options ...OptionFunc) (*BuildTrigger, *Response, error) {
- project, err := parseID(pid)
- if err != nil {
- return nil, nil, err
- }
- u := fmt.Sprintf("projects/%s/triggers", url.QueryEscape(project))
-
- req, err := s.client.NewRequest("POST", u, nil, options)
- if err != nil {
- return nil, nil, err
- }
-
- bt := new(BuildTrigger)
- resp, err := s.client.Do(req, bt)
- if err != nil {
- return nil, resp, err
- }
-
- return bt, resp, err
-}
-
-// DeleteBuildTrigger removes a trigger from a project.
-//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/build_triggers.md#remove-a-project-trigger
-func (s *ProjectsService) DeleteBuildTrigger(pid interface{}, token string, options ...OptionFunc) (*Response, error) {
- project, err := parseID(pid)
- if err != nil {
- return nil, err
- }
- u := fmt.Sprintf("projects/%s/triggers/%s", url.QueryEscape(project), token)
-
- req, err := s.client.NewRequest("DELETE", u, nil, options)
- if err != nil {
- return nil, err
- }
-
- return s.client.Do(req, nil)
-}
-
// ProjectForkRelation represents a project fork relationship.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#admin-fork-relation
+// https://docs.gitlab.com/ce/api/projects.html#admin-fork-relation
type ProjectForkRelation struct {
ID int `json:"id"`
ForkedToProjectID int `json:"forked_to_project_id"`
@@ -1020,7 +773,7 @@ type ProjectForkRelation struct {
// existing projects.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#create-a-forked-fromto-relation-between-existing-projects.
+// https://docs.gitlab.com/ce/api/projects.html#create-a-forked-fromto-relation-between-existing-projects.
func (s *ProjectsService) CreateProjectForkRelation(pid int, fork int, options ...OptionFunc) (*ProjectForkRelation, *Response, error) {
u := fmt.Sprintf("projects/%d/fork/%d", pid, fork)
@@ -1041,7 +794,7 @@ func (s *ProjectsService) CreateProjectForkRelation(pid int, fork int, options .
// DeleteProjectForkRelation deletes an existing forked from relationship.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#delete-an-existing-forked-from-relationship
+// https://docs.gitlab.com/ce/api/projects.html#delete-an-existing-forked-from-relationship
func (s *ProjectsService) DeleteProjectForkRelation(pid int, options ...OptionFunc) (*Response, error) {
u := fmt.Sprintf("projects/%d/fork", pid)
@@ -1053,54 +806,60 @@ func (s *ProjectsService) DeleteProjectForkRelation(pid int, options ...OptionFu
return s.client.Do(req, nil)
}
-// ArchiveProject archives the project if the user is either admin or the
-// project owner of this project.
+// ProjectFile represents an uploaded project file
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#archive-a-project
-func (s *ProjectsService) ArchiveProject(pid interface{}, options ...OptionFunc) (*Project, *Response, error) {
+// GitLab API docs: https://docs.gitlab.com/ce/api/projects.html#upload-a-file
+type ProjectFile struct {
+ Alt string `json:"alt"`
+ URL string `json:"url"`
+ Markdown string `json:"markdown"`
+}
+
+// UploadFile upload a file from disk
+//
+// GitLab API docs: https://docs.gitlab.com/ce/api/projects.html#upload-a-file
+func (s *ProjectsService) UploadFile(pid interface{}, file string, options ...OptionFunc) (*ProjectFile, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
- u := fmt.Sprintf("projects/%s/archive", url.QueryEscape(project))
+ u := fmt.Sprintf("projects/%s/uploads", url.QueryEscape(project))
- req, err := s.client.NewRequest("POST", u, nil, options)
+ f, err := os.Open(file)
if err != nil {
return nil, nil, err
}
+ defer f.Close()
- p := new(Project)
- resp, err := s.client.Do(req, p)
+ b := &bytes.Buffer{}
+ w := multipart.NewWriter(b)
+
+ fw, err := w.CreateFormFile("file", file)
if err != nil {
- return nil, resp, err
+ return nil, nil, err
}
- return p, resp, err
-}
-
-// UnarchiveProject unarchives the project if the user is either admin or
-// the project owner of this project.
-//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#unarchive-a-project
-func (s *ProjectsService) UnarchiveProject(pid interface{}, options ...OptionFunc) (*Project, *Response, error) {
- project, err := parseID(pid)
+ _, err = io.Copy(fw, f)
if err != nil {
return nil, nil, err
}
- u := fmt.Sprintf("projects/%s/unarchive", url.QueryEscape(project))
+ w.Close()
- req, err := s.client.NewRequest("POST", u, nil, options)
+ req, err := s.client.NewRequest("", u, nil, options)
if err != nil {
return nil, nil, err
}
- p := new(Project)
- resp, err := s.client.Do(req, p)
+ req.Body = ioutil.NopCloser(b)
+ req.ContentLength = int64(b.Len())
+ req.Header.Set("Content-Type", w.FormDataContentType())
+ req.Method = "POST"
+
+ uf := &ProjectFile{}
+ resp, err := s.client.Do(req, uf)
if err != nil {
return nil, resp, err
}
- return p, resp, err
+ return uf, resp, nil
}
diff --git a/vendor/github.com/xanzy/go-gitlab/repositories.go b/vendor/github.com/xanzy/go-gitlab/repositories.go
index 02a0635..cb405ce 100644
--- a/vendor/github.com/xanzy/go-gitlab/repositories.go
+++ b/vendor/github.com/xanzy/go-gitlab/repositories.go
@@ -1,5 +1,5 @@
//
-// Copyright 2015, Sander van Harmelen
+// Copyright 2017, Sander van Harmelen
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -25,20 +25,19 @@ import (
// RepositoriesService handles communication with the repositories related
// methods of the GitLab API.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repositories.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/repositories.html
type RepositoriesService struct {
client *Client
}
// TreeNode represents a GitLab repository file or directory.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repositories.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/repositories.html
type TreeNode struct {
ID string `json:"id"`
Name string `json:"name"`
Type string `json:"type"`
+ Path string `json:"path"`
Mode string `json:"mode"`
}
@@ -49,16 +48,17 @@ func (t TreeNode) String() string {
// ListTreeOptions represents the available ListTree() options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repositories.md#list-repository-tree
+// https://docs.gitlab.com/ce/api/repositories.html#list-repository-tree
type ListTreeOptions struct {
- Path *string `url:"path,omitempty" json:"path,omitempty"`
- RefName *string `url:"ref_name,omitempty" json:"ref_name,omitempty"`
+ Path *string `url:"path,omitempty" json:"path,omitempty"`
+ Ref *string `url:"ref,omitempty" json:"ref,omitempty"`
+ Recursive *bool `url:"recursive,omitempty" json:"recursive,omitempty"`
}
// ListTree gets a list of repository files and directories in a project.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repositories.md#list-repository-tree
+// https://docs.gitlab.com/ce/api/repositories.html#list-repository-tree
func (s *RepositoriesService) ListTree(pid interface{}, opt *ListTreeOptions, options ...OptionFunc) ([]*TreeNode, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -80,26 +80,18 @@ func (s *RepositoriesService) ListTree(pid interface{}, opt *ListTreeOptions, op
return t, resp, err
}
-// RawFileContentOptions represents the available RawFileContent() options.
-//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repositories.md#raw-file-content
-type RawFileContentOptions struct {
- FilePath *string `url:"filepath,omitempty" json:"filepath,omitempty"`
-}
-
// RawFileContent gets the raw file contents for a file by commit SHA and path
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repositories.md#raw-file-content
-func (s *RepositoriesService) RawFileContent(pid interface{}, sha string, opt *RawFileContentOptions, options ...OptionFunc) ([]byte, *Response, error) {
+// https://docs.gitlab.com/ce/api/repositories.html#raw-file-content
+func (s *RepositoriesService) RawFileContent(pid interface{}, sha string, options ...OptionFunc) ([]byte, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
u := fmt.Sprintf("projects/%s/repository/blobs/%s", url.QueryEscape(project), sha)
- req, err := s.client.NewRequest("GET", u, opt, options)
+ req, err := s.client.NewRequest("GET", u, nil, options)
if err != nil {
return nil, nil, err
}
@@ -116,13 +108,13 @@ func (s *RepositoriesService) RawFileContent(pid interface{}, sha string, opt *R
// RawBlobContent gets the raw file contents for a blob by blob SHA.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repositories.md#raw-blob-content
+// https://docs.gitlab.com/ce/api/repositories.html#raw-blob-content
func (s *RepositoriesService) RawBlobContent(pid interface{}, sha string, options ...OptionFunc) ([]byte, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
- u := fmt.Sprintf("projects/%s/repository/raw_blobs/%s", url.QueryEscape(project), sha)
+ u := fmt.Sprintf("projects/%s/repository/blobs/%s/raw", url.QueryEscape(project), sha)
req, err := s.client.NewRequest("GET", u, nil, options)
if err != nil {
@@ -141,7 +133,7 @@ func (s *RepositoriesService) RawBlobContent(pid interface{}, sha string, option
// ArchiveOptions represents the available Archive() options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repositories.md#get-file-archive
+// https://docs.gitlab.com/ce/api/repositories.html#get-file-archive
type ArchiveOptions struct {
SHA *string `url:"sha,omitempty" json:"sha,omitempty"`
}
@@ -149,7 +141,7 @@ type ArchiveOptions struct {
// Archive gets an archive of the repository.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repositories.md#get-file-archive
+// https://docs.gitlab.com/ce/api/repositories.html#get-file-archive
func (s *RepositoriesService) Archive(pid interface{}, opt *ArchiveOptions, options ...OptionFunc) ([]byte, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -174,7 +166,7 @@ func (s *RepositoriesService) Archive(pid interface{}, opt *ArchiveOptions, opti
// Compare represents the result of a comparison of branches, tags or commits.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repositories.md#compare-branches-tags-or-commits
+// https://docs.gitlab.com/ce/api/repositories.html#compare-branches-tags-or-commits
type Compare struct {
Commit *Commit `json:"commit"`
Commits []*Commit `json:"commits"`
@@ -190,7 +182,7 @@ func (c Compare) String() string {
// CompareOptions represents the available Compare() options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repositories.md#compare-branches-tags-or-commits
+// https://docs.gitlab.com/ce/api/repositories.html#compare-branches-tags-or-commits
type CompareOptions struct {
From *string `url:"from,omitempty" json:"from,omitempty"`
To *string `url:"to,omitempty" json:"to,omitempty"`
@@ -199,7 +191,7 @@ type CompareOptions struct {
// Compare compares branches, tags or commits.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repositories.md#compare-branches-tags-or-commits
+// https://docs.gitlab.com/ce/api/repositories.html#compare-branches-tags-or-commits
func (s *RepositoriesService) Compare(pid interface{}, opt *CompareOptions, options ...OptionFunc) (*Compare, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -223,8 +215,7 @@ func (s *RepositoriesService) Compare(pid interface{}, opt *CompareOptions, opti
// Contributor represents a GitLap contributor.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repositories.md#contributer
+// GitLab API docs: https://docs.gitlab.com/ce/api/repositories.html#contributer
type Contributor struct {
Name string `json:"name,omitempty"`
Email string `json:"email,omitempty"`
@@ -239,8 +230,7 @@ func (c Contributor) String() string {
// Contributors gets the repository contributors list.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repositories.md#contributer
+// GitLab API docs: https://docs.gitlab.com/ce/api/repositories.html#contributer
func (s *RepositoriesService) Contributors(pid interface{}, options ...OptionFunc) ([]*Contributor, *Response, error) {
project, err := parseID(pid)
if err != nil {
diff --git a/vendor/github.com/xanzy/go-gitlab/repository_files.go b/vendor/github.com/xanzy/go-gitlab/repository_files.go
index 5c0bc4b..873fe73 100644
--- a/vendor/github.com/xanzy/go-gitlab/repository_files.go
+++ b/vendor/github.com/xanzy/go-gitlab/repository_files.go
@@ -1,5 +1,5 @@
//
-// Copyright 2015, Sander van Harmelen
+// Copyright 2017, Sander van Harmelen
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -24,16 +24,14 @@ import (
// RepositoryFilesService handles communication with the repository files
// related methods of the GitLab API.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repository_files.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/repository_files.html
type RepositoryFilesService struct {
client *Client
}
// File represents a GitLab repository file.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repository_files.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/repository_files.html
type File struct {
FileName string `json:"file_name"`
FilePath string `json:"file_path"`
@@ -52,23 +50,22 @@ func (r File) String() string {
// GetFileOptions represents the available GetFile() options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repository_files.md#get-file-from-respository
+// https://docs.gitlab.com/ce/api/repository_files.html#get-file-from-repository
type GetFileOptions struct {
- FilePath *string `url:"file_path,omitempty" json:"file_path,omitempty"`
- Ref *string `url:"ref,omitempty" json:"ref,omitempty"`
+ Ref *string `url:"ref,omitempty" json:"ref,omitempty"`
}
// GetFile allows you to receive information about a file in repository like
// name, size, content. Note that file content is Base64 encoded.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repository_files.md#get-file-from-respository
-func (s *RepositoryFilesService) GetFile(pid interface{}, opt *GetFileOptions, options ...OptionFunc) (*File, *Response, error) {
+// https://docs.gitlab.com/ce/api/repository_files.html#get-file-from-repository
+func (s *RepositoryFilesService) GetFile(pid interface{}, fileName string, opt *GetFileOptions, options ...OptionFunc) (*File, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
- u := fmt.Sprintf("projects/%s/repository/files", url.QueryEscape(project))
+ u := fmt.Sprintf("projects/%s/repository/files/%s", url.QueryEscape(project), url.QueryEscape(fileName))
req, err := s.client.NewRequest("GET", u, opt, options)
if err != nil {
@@ -84,13 +81,45 @@ func (s *RepositoryFilesService) GetFile(pid interface{}, opt *GetFileOptions, o
return f, resp, err
}
-// FileInfo represents file details of a GitLab repository file.
+// GetRawFileOptions represents the available GetRawFile() options.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/repository_files.html#get-raw-file-from-repository
+type GetRawFileOptions struct {
+ Ref *string `url:"ref,omitempty" json:"ref,omitempty"`
+}
+
+// GetRawFile allows you to receive the raw file in repository.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repository_files.md
+// https://docs.gitlab.com/ce/api/repository_files.html#get-raw-file-from-repository
+func (s *RepositoryFilesService) GetRawFile(pid interface{}, fileName string, opt *GetRawFileOptions, options ...OptionFunc) (*File, *Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, nil, err
+ }
+ u := fmt.Sprintf("projects/%s/repository/files/%s/raw", url.QueryEscape(project), url.QueryEscape(fileName))
+
+ req, err := s.client.NewRequest("GET", u, opt, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ f := new(File)
+ resp, err := s.client.Do(req, f)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return f, resp, err
+}
+
+// FileInfo represents file details of a GitLab repository file.
+//
+// GitLab API docs: https://docs.gitlab.com/ce/api/repository_files.html
type FileInfo struct {
- FilePath string `json:"file_path"`
- BranchName string `json:"branch_name"`
+ FilePath string `json:"file_path"`
+ Branch string `json:"branch"`
}
func (r FileInfo) String() string {
@@ -100,10 +129,9 @@ func (r FileInfo) String() string {
// CreateFileOptions represents the available CreateFile() options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repository_files.md#create-new-file-in-repository
+// https://docs.gitlab.com/ce/api/repository_files.html#create-new-file-in-repository
type CreateFileOptions struct {
- FilePath *string `url:"file_path,omitempty" json:"file_path,omitempty"`
- BranchName *string `url:"branch_name,omitempty" json:"branch_name,omitempty"`
+ Branch *string `url:"branch,omitempty" json:"branch,omitempty"`
Encoding *string `url:"encoding,omitempty" json:"encoding,omitempty"`
AuthorEmail *string `url:"author_email,omitempty" json:"author_email,omitempty"`
AuthorName *string `url:"author_name,omitempty" json:"author_name,omitempty"`
@@ -114,13 +142,13 @@ type CreateFileOptions struct {
// CreateFile creates a new file in a repository.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repository_files.md#create-new-file-in-repository
-func (s *RepositoryFilesService) CreateFile(pid interface{}, opt *CreateFileOptions, options ...OptionFunc) (*FileInfo, *Response, error) {
+// https://docs.gitlab.com/ce/api/repository_files.html#create-new-file-in-repository
+func (s *RepositoryFilesService) CreateFile(pid interface{}, fileName string, opt *CreateFileOptions, options ...OptionFunc) (*FileInfo, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
- u := fmt.Sprintf("projects/%s/repository/files", url.QueryEscape(project))
+ u := fmt.Sprintf("projects/%s/repository/files/%s", url.QueryEscape(project), url.QueryEscape(fileName))
req, err := s.client.NewRequest("POST", u, opt, options)
if err != nil {
@@ -139,27 +167,27 @@ func (s *RepositoryFilesService) CreateFile(pid interface{}, opt *CreateFileOpti
// UpdateFileOptions represents the available UpdateFile() options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repository_files.md#update-existing-file-in-repository
+// https://docs.gitlab.com/ce/api/repository_files.html#update-existing-file-in-repository
type UpdateFileOptions struct {
- FilePath *string `url:"file_path,omitempty" json:"file_path,omitempty"`
- BranchName *string `url:"branch_name,omitempty" json:"branch_name,omitempty"`
+ Branch *string `url:"branch,omitempty" json:"branch,omitempty"`
Encoding *string `url:"encoding,omitempty" json:"encoding,omitempty"`
AuthorEmail *string `url:"author_email,omitempty" json:"author_email,omitempty"`
AuthorName *string `url:"author_name,omitempty" json:"author_name,omitempty"`
Content *string `url:"content,omitempty" json:"content,omitempty"`
CommitMessage *string `url:"commit_message,omitempty" json:"commit_message,omitempty"`
+ LastCommitID *string `url:"last_commit_id,omitempty" json:"last_commit_id,omitempty"`
}
// UpdateFile updates an existing file in a repository
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repository_files.md#update-existing-file-in-repository
-func (s *RepositoryFilesService) UpdateFile(pid interface{}, opt *UpdateFileOptions, options ...OptionFunc) (*FileInfo, *Response, error) {
+// https://docs.gitlab.com/ce/api/repository_files.html#update-existing-file-in-repository
+func (s *RepositoryFilesService) UpdateFile(pid interface{}, fileName string, opt *UpdateFileOptions, options ...OptionFunc) (*FileInfo, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
- u := fmt.Sprintf("projects/%s/repository/files", url.QueryEscape(project))
+ u := fmt.Sprintf("projects/%s/repository/files/%s", url.QueryEscape(project), url.QueryEscape(fileName))
req, err := s.client.NewRequest("PUT", u, opt, options)
if err != nil {
@@ -178,10 +206,9 @@ func (s *RepositoryFilesService) UpdateFile(pid interface{}, opt *UpdateFileOpti
// DeleteFileOptions represents the available DeleteFile() options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repository_files.md#delete-existing-file-in-repository
+// https://docs.gitlab.com/ce/api/repository_files.html#delete-existing-file-in-repository
type DeleteFileOptions struct {
- FilePath *string `url:"file_path,omitempty" json:"file_path,omitempty"`
- BranchName *string `url:"branch_name,omitempty" json:"branch_name,omitempty"`
+ Branch *string `url:"branch,omitempty" json:"branch,omitempty"`
AuthorEmail *string `url:"author_email,omitempty" json:"author_email,omitempty"`
AuthorName *string `url:"author_name,omitempty" json:"author_name,omitempty"`
CommitMessage *string `url:"commit_message,omitempty" json:"commit_message,omitempty"`
@@ -190,24 +217,18 @@ type DeleteFileOptions struct {
// DeleteFile deletes an existing file in a repository
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repository_files.md#delete-existing-file-in-repository
-func (s *RepositoryFilesService) DeleteFile(pid interface{}, opt *DeleteFileOptions, options ...OptionFunc) (*FileInfo, *Response, error) {
+// https://docs.gitlab.com/ce/api/repository_files.html#delete-existing-file-in-repository
+func (s *RepositoryFilesService) DeleteFile(pid interface{}, fileName string, opt *DeleteFileOptions, options ...OptionFunc) (*Response, error) {
project, err := parseID(pid)
if err != nil {
- return nil, nil, err
+ return nil, err
}
- u := fmt.Sprintf("projects/%s/repository/files", url.QueryEscape(project))
+ u := fmt.Sprintf("projects/%s/repository/files/%s", url.QueryEscape(project), url.QueryEscape(fileName))
req, err := s.client.NewRequest("DELETE", u, opt, options)
if err != nil {
- return nil, nil, err
+ return nil, err
}
- f := new(FileInfo)
- resp, err := s.client.Do(req, f)
- if err != nil {
- return nil, resp, err
- }
-
- return f, resp, err
+ return s.client.Do(req, nil)
}
diff --git a/vendor/github.com/xanzy/go-gitlab/services.go b/vendor/github.com/xanzy/go-gitlab/services.go
index 7dd094d..2613f3e 100644
--- a/vendor/github.com/xanzy/go-gitlab/services.go
+++ b/vendor/github.com/xanzy/go-gitlab/services.go
@@ -1,5 +1,5 @@
//
-// Copyright 2015, Sander van Harmelen
+// Copyright 2017, Sander van Harmelen
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -25,34 +25,34 @@ import (
// ServicesService handles communication with the services related methods of
// the GitLab API.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/services.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/services.html
type ServicesService struct {
client *Client
}
// Service represents a GitLab service.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/services.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/services.html
type Service struct {
- ID *int `json:"id"`
- Title *string `json:"title"`
+ ID int `json:"id"`
+ Title string `json:"title"`
CreatedAt *time.Time `json:"created_at"`
- UpdatedAt *time.Time `json:"created_at"`
- Active *bool `json:"active"`
- PushEvents *bool `json:"push_events"`
- IssuesEvents *bool `json:"issues_events"`
- MergeRequestsEvents *bool `json:"merge_requests_events"`
- TagPushEvents *bool `json:"tag_push_events"`
- NoteEvents *bool `json:"note_events"`
+ UpdatedAt *time.Time `json:"updated_at"`
+ Active bool `json:"active"`
+ PushEvents bool `json:"push_events"`
+ IssuesEvents bool `json:"issues_events"`
+ MergeRequestsEvents bool `json:"merge_requests_events"`
+ TagPushEvents bool `json:"tag_push_events"`
+ NoteEvents bool `json:"note_events"`
+ PipelineEvents bool `json:"pipeline_events"`
+ JobEvents bool `json:"job_events"`
}
// SetGitLabCIServiceOptions represents the available SetGitLabCIService()
// options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/services.md#edit-gitlab-ci-service
+// https://docs.gitlab.com/ce/api/services.html#edit-gitlab-ci-service
type SetGitLabCIServiceOptions struct {
Token *string `url:"token,omitempty" json:"token,omitempty"`
ProjectURL *string `url:"project_url,omitempty" json:"project_url,omitempty"`
@@ -61,7 +61,7 @@ type SetGitLabCIServiceOptions struct {
// SetGitLabCIService sets GitLab CI service for a project.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/services.md#edit-gitlab-ci-service
+// https://docs.gitlab.com/ce/api/services.html#edit-gitlab-ci-service
func (s *ServicesService) SetGitLabCIService(pid interface{}, opt *SetGitLabCIServiceOptions, options ...OptionFunc) (*Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -80,7 +80,7 @@ func (s *ServicesService) SetGitLabCIService(pid interface{}, opt *SetGitLabCISe
// DeleteGitLabCIService deletes GitLab CI service settings for a project.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/services.md#delete-gitlab-ci-service
+// https://docs.gitlab.com/ce/api/services.html#delete-gitlab-ci-service
func (s *ServicesService) DeleteGitLabCIService(pid interface{}, options ...OptionFunc) (*Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -100,7 +100,7 @@ func (s *ServicesService) DeleteGitLabCIService(pid interface{}, options ...Opti
// options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/services.md#edit-hipchat-service
+// https://docs.gitlab.com/ce/api/services.html#edit-hipchat-service
type SetHipChatServiceOptions struct {
Token *string `url:"token,omitempty" json:"token,omitempty" `
Room *string `url:"room,omitempty" json:"room,omitempty"`
@@ -109,7 +109,7 @@ type SetHipChatServiceOptions struct {
// SetHipChatService sets HipChat service for a project
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/services.md#edit-hipchat-service
+// https://docs.gitlab.com/ce/api/services.html#edit-hipchat-service
func (s *ServicesService) SetHipChatService(pid interface{}, opt *SetHipChatServiceOptions, options ...OptionFunc) (*Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -128,7 +128,7 @@ func (s *ServicesService) SetHipChatService(pid interface{}, opt *SetHipChatServ
// DeleteHipChatService deletes HipChat service for project.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/services.md#delete-hipchat-service
+// https://docs.gitlab.com/ce/api/services.html#delete-hipchat-service
func (s *ServicesService) DeleteHipChatService(pid interface{}, options ...OptionFunc) (*Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -148,17 +148,17 @@ func (s *ServicesService) DeleteHipChatService(pid interface{}, options ...Optio
// options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/services.md#createedit-drone-ci-service
+// https://docs.gitlab.com/ce/api/services.html#createedit-drone-ci-service
type SetDroneCIServiceOptions struct {
Token *string `url:"token" json:"token" `
DroneURL *string `url:"drone_url" json:"drone_url"`
- EnableSSLVerification *string `url:"enable_ssl_verification,omitempty" json:"enable_ssl_verification,omitempty"`
+ EnableSSLVerification *bool `url:"enable_ssl_verification,omitempty" json:"enable_ssl_verification,omitempty"`
}
// SetDroneCIService sets Drone CI service for a project.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/services.md#createedit-drone-ci-service
+// https://docs.gitlab.com/ce/api/services.html#createedit-drone-ci-service
func (s *ServicesService) SetDroneCIService(pid interface{}, opt *SetDroneCIServiceOptions, options ...OptionFunc) (*Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -177,7 +177,7 @@ func (s *ServicesService) SetDroneCIService(pid interface{}, opt *SetDroneCIServ
// DeleteDroneCIService deletes Drone CI service settings for a project.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/services.md#delete-drone-ci-service
+// https://docs.gitlab.com/ce/api/services.html#delete-drone-ci-service
func (s *ServicesService) DeleteDroneCIService(pid interface{}, options ...OptionFunc) (*Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -193,23 +193,23 @@ func (s *ServicesService) DeleteDroneCIService(pid interface{}, options ...Optio
return s.client.Do(req, nil)
}
-// DroneCIServiceProperties represents Drone CI specific properties.
-type DroneCIServiceProperties struct {
- Token *string `url:"token" json:"token"`
- DroneURL *string `url:"drone_url" json:"drone_url"`
- EnableSSLVerification *string `url:"enable_ssl_verification" json:"enable_ssl_verification"`
-}
-
// DroneCIService represents Drone CI service settings.
type DroneCIService struct {
Service
Properties *DroneCIServiceProperties `json:"properties"`
}
+// DroneCIServiceProperties represents Drone CI specific properties.
+type DroneCIServiceProperties struct {
+ Token string `json:"token"`
+ DroneURL string `json:"drone_url"`
+ EnableSSLVerification bool `json:"enable_ssl_verification"`
+}
+
// GetDroneCIService gets Drone CI service settings for a project.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/services.md#get-drone-ci-service-settings
+// https://docs.gitlab.com/ce/api/services.html#get-drone-ci-service-settings
func (s *ServicesService) GetDroneCIService(pid interface{}, options ...OptionFunc) (*DroneCIService, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -222,20 +222,56 @@ func (s *ServicesService) GetDroneCIService(pid interface{}, options ...OptionFu
return nil, nil, err
}
- opt := new(DroneCIService)
- resp, err := s.client.Do(req, opt)
+ svc := new(DroneCIService)
+ resp, err := s.client.Do(req, svc)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return svc, resp, err
+}
+
+// SlackService represents Slack service settings.
+type SlackService struct {
+ Service
+ Properties *SlackServiceProperties `json:"properties"`
+}
+
+// SlackServiceProperties represents Slack specific properties.
+type SlackServiceProperties struct {
+ NotifyOnlyBrokenPipelines bool `json:"notify_only_broken_pipelines"`
+}
+
+// GetSlackService gets Slack service settings for a project.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/services.html#get-slack-service-settings
+func (s *ServicesService) GetSlackService(pid interface{}, options ...OptionFunc) (*SlackService, *Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, nil, err
+ }
+ u := fmt.Sprintf("projects/%s/services/slack", url.QueryEscape(project))
+
+ req, err := s.client.NewRequest("GET", u, nil, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ svc := new(SlackService)
+ resp, err := s.client.Do(req, svc)
if err != nil {
return nil, resp, err
}
- return opt, resp, err
+ return svc, resp, err
}
// SetSlackServiceOptions represents the available SetSlackService()
// options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/services.md#edit-slack-service
+// https://docs.gitlab.com/ce/api/services.html#edit-slack-service
type SetSlackServiceOptions struct {
WebHook *string `url:"webhook,omitempty" json:"webhook,omitempty" `
Username *string `url:"username,omitempty" json:"username,omitempty" `
@@ -245,7 +281,7 @@ type SetSlackServiceOptions struct {
// SetSlackService sets Slack service for a project
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/services.md#edit-slack-service
+// https://docs.gitlab.com/ce/api/services.html#edit-slack-service
func (s *ServicesService) SetSlackService(pid interface{}, opt *SetSlackServiceOptions, options ...OptionFunc) (*Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -264,7 +300,7 @@ func (s *ServicesService) SetSlackService(pid interface{}, opt *SetSlackServiceO
// DeleteSlackService deletes Slack service for project.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/services.md#delete-slack-service
+// https://docs.gitlab.com/ce/api/services.html#delete-slack-service
func (s *ServicesService) DeleteSlackService(pid interface{}, options ...OptionFunc) (*Response, error) {
project, err := parseID(pid)
if err != nil {
diff --git a/vendor/github.com/xanzy/go-gitlab/session.go b/vendor/github.com/xanzy/go-gitlab/session.go
index a5083c2..f89fdbe 100644
--- a/vendor/github.com/xanzy/go-gitlab/session.go
+++ b/vendor/github.com/xanzy/go-gitlab/session.go
@@ -1,5 +1,5 @@
//
-// Copyright 2015, Sander van Harmelen
+// Copyright 2017, Sander van Harmelen
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -21,16 +21,14 @@ import "time"
// SessionService handles communication with the session related methods of
// the GitLab API.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/session.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/session.html
type SessionService struct {
client *Client
}
// Session represents a GitLab session.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/session.md#session
+// GitLab API docs: https://docs.gitlab.com/ce/api/session.html#session
type Session struct {
ID int `json:"id"`
Username string `json:"username"`
@@ -54,8 +52,7 @@ type Session struct {
// GetSessionOptions represents the available Session() options.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/session.md#session
+// GitLab API docs: https://docs.gitlab.com/ce/api/session.html#session
type GetSessionOptions struct {
Login *string `url:"login,omitempty" json:"login,omitempty"`
Email *string `url:"email,omitempty" json:"email,omitempty"`
@@ -64,8 +61,7 @@ type GetSessionOptions struct {
// GetSession logs in to get private token.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/session.md#session
+// GitLab API docs: https://docs.gitlab.com/ce/api/session.html#session
func (s *SessionService) GetSession(opt *GetSessionOptions, options ...OptionFunc) (*Session, *Response, error) {
req, err := s.client.NewRequest("POST", "session", opt, options)
if err != nil {
diff --git a/vendor/github.com/xanzy/go-gitlab/settings.go b/vendor/github.com/xanzy/go-gitlab/settings.go
index a5c2d61..a0a293d 100644
--- a/vendor/github.com/xanzy/go-gitlab/settings.go
+++ b/vendor/github.com/xanzy/go-gitlab/settings.go
@@ -1,5 +1,5 @@
//
-// Copyright 2015, Sander van Harmelen
+// Copyright 2017, Sander van Harmelen
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -21,36 +21,34 @@ import "time"
// SettingsService handles communication with the application SettingsService
// related methods of the GitLab API.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/settings.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/settings.html
type SettingsService struct {
client *Client
}
// Settings represents the GitLab application settings.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/settings.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/settings.html
type Settings struct {
- ID int `json:"id"`
- DefaultProjectsLimit int `json:"default_projects_limit"`
- SignupEnabled bool `json:"signup_enabled"`
- SigninEnabled bool `json:"signin_enabled"`
- GravatarEnabled bool `json:"gravatar_enabled"`
- SignInText string `json:"sign_in_text"`
- CreatedAt *time.Time `json:"created_at"`
- UpdatedAt *time.Time `json:"updated_at"`
- HomePageURL string `json:"home_page_url"`
- DefaultBranchProtection int `json:"default_branch_protection"`
- TwitterSharingEnabled bool `json:"twitter_sharing_enabled"`
- RestrictedVisibilityLevels []VisibilityLevelValue `json:"restricted_visibility_levels"`
- MaxAttachmentSize int `json:"max_attachment_size"`
- SessionExpireDelay int `json:"session_expire_delay"`
- DefaultProjectVisibility int `json:"default_project_visibility"`
- DefaultSnippetVisibility int `json:"default_snippet_visibility"`
- RestrictedSignupDomains []string `json:"restricted_signup_domains"`
- UserOauthApplications bool `json:"user_oauth_applications"`
- AfterSignOutPath string `json:"after_sign_out_path"`
+ ID int `json:"id"`
+ DefaultProjectsLimit int `json:"default_projects_limit"`
+ SignupEnabled bool `json:"signup_enabled"`
+ SigninEnabled bool `json:"signin_enabled"`
+ GravatarEnabled bool `json:"gravatar_enabled"`
+ SignInText string `json:"sign_in_text"`
+ CreatedAt *time.Time `json:"created_at"`
+ UpdatedAt *time.Time `json:"updated_at"`
+ HomePageURL string `json:"home_page_url"`
+ DefaultBranchProtection int `json:"default_branch_protection"`
+ TwitterSharingEnabled bool `json:"twitter_sharing_enabled"`
+ RestrictedVisibilityLevels []VisibilityValue `json:"restricted_visibility_levels"`
+ MaxAttachmentSize int `json:"max_attachment_size"`
+ SessionExpireDelay int `json:"session_expire_delay"`
+ DefaultProjectVisibility int `json:"default_project_visibility"`
+ DefaultSnippetVisibility int `json:"default_snippet_visibility"`
+ RestrictedSignupDomains []string `json:"restricted_signup_domains"`
+ UserOauthApplications bool `json:"user_oauth_applications"`
+ AfterSignOutPath string `json:"after_sign_out_path"`
}
func (s Settings) String() string {
@@ -60,7 +58,7 @@ func (s Settings) String() string {
// GetSettings gets the current application settings.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/settings.md#get-current-application.settings
+// https://docs.gitlab.com/ce/api/settings.html#get-current-application.settings
func (s *SettingsService) GetSettings(options ...OptionFunc) (*Settings, *Response, error) {
req, err := s.client.NewRequest("GET", "application/settings", nil, options)
if err != nil {
@@ -79,30 +77,30 @@ func (s *SettingsService) GetSettings(options ...OptionFunc) (*Settings, *Respon
// UpdateSettingsOptions represents the available UpdateSettings() options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/settings.md#change-application.settings
+// https://docs.gitlab.com/ce/api/settings.html#change-application.settings
type UpdateSettingsOptions struct {
- DefaultProjectsLimit *int `url:"default_projects_limit,omitempty" json:"default_projects_limit,omitempty"`
- SignupEnabled *bool `url:"signup_enabled,omitempty" json:"signup_enabled,omitempty"`
- SigninEnabled *bool `url:"signin_enabled,omitempty" json:"signin_enabled,omitempty"`
- GravatarEnabled *bool `url:"gravatar_enabled,omitempty" json:"gravatar_enabled,omitempty"`
- SignInText *string `url:"sign_in_text,omitempty" json:"sign_in_text,omitempty"`
- HomePageURL *string `url:"home_page_url,omitempty" json:"home_page_url,omitempty"`
- DefaultBranchProtection *int `url:"default_branch_protection,omitempty" json:"default_branch_protection,omitempty"`
- TwitterSharingEnabled *bool `url:"twitter_sharing_enabled,omitempty" json:"twitter_sharing_enabled,omitempty"`
- RestrictedVisibilityLevels []VisibilityLevelValue `url:"restricted_visibility_levels,omitempty" json:"restricted_visibility_levels,omitempty"`
- MaxAttachmentSize *int `url:"max_attachment_size,omitempty" json:"max_attachment_size,omitempty"`
- SessionExpireDelay *int `url:"session_expire_delay,omitempty" json:"session_expire_delay,omitempty"`
- DefaultProjectVisibility *int `url:"default_project_visibility,omitempty" json:"default_project_visibility,omitempty"`
- DefaultSnippetVisibility *int `url:"default_snippet_visibility,omitempty" json:"default_snippet_visibility,omitempty"`
- RestrictedSignupDomains []string `url:"restricted_signup_domains,omitempty" json:"restricted_signup_domains,omitempty"`
- UserOauthApplications *bool `url:"user_oauth_applications,omitempty" json:"user_oauth_applications,omitempty"`
- AfterSignOutPath *string `url:"after_sign_out_path,omitempty" json:"after_sign_out_path,omitempty"`
+ DefaultProjectsLimit *int `url:"default_projects_limit,omitempty" json:"default_projects_limit,omitempty"`
+ SignupEnabled *bool `url:"signup_enabled,omitempty" json:"signup_enabled,omitempty"`
+ SigninEnabled *bool `url:"signin_enabled,omitempty" json:"signin_enabled,omitempty"`
+ GravatarEnabled *bool `url:"gravatar_enabled,omitempty" json:"gravatar_enabled,omitempty"`
+ SignInText *string `url:"sign_in_text,omitempty" json:"sign_in_text,omitempty"`
+ HomePageURL *string `url:"home_page_url,omitempty" json:"home_page_url,omitempty"`
+ DefaultBranchProtection *int `url:"default_branch_protection,omitempty" json:"default_branch_protection,omitempty"`
+ TwitterSharingEnabled *bool `url:"twitter_sharing_enabled,omitempty" json:"twitter_sharing_enabled,omitempty"`
+ RestrictedVisibilityLevels []VisibilityValue `url:"restricted_visibility_levels,omitempty" json:"restricted_visibility_levels,omitempty"`
+ MaxAttachmentSize *int `url:"max_attachment_size,omitempty" json:"max_attachment_size,omitempty"`
+ SessionExpireDelay *int `url:"session_expire_delay,omitempty" json:"session_expire_delay,omitempty"`
+ DefaultProjectVisibility *int `url:"default_project_visibility,omitempty" json:"default_project_visibility,omitempty"`
+ DefaultSnippetVisibility *int `url:"default_snippet_visibility,omitempty" json:"default_snippet_visibility,omitempty"`
+ RestrictedSignupDomains []string `url:"restricted_signup_domains,omitempty" json:"restricted_signup_domains,omitempty"`
+ UserOauthApplications *bool `url:"user_oauth_applications,omitempty" json:"user_oauth_applications,omitempty"`
+ AfterSignOutPath *string `url:"after_sign_out_path,omitempty" json:"after_sign_out_path,omitempty"`
}
// UpdateSettings updates the application settings.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/settings.md#change-application.settings
+// https://docs.gitlab.com/ce/api/settings.html#change-application.settings
func (s *SettingsService) UpdateSettings(opt *UpdateSettingsOptions, options ...OptionFunc) (*Settings, *Response, error) {
req, err := s.client.NewRequest("PUT", "application/settings", opt, options)
if err != nil {
diff --git a/vendor/github.com/xanzy/go-gitlab/strings.go b/vendor/github.com/xanzy/go-gitlab/strings.go
index d0e7679..aeefb6b 100644
--- a/vendor/github.com/xanzy/go-gitlab/strings.go
+++ b/vendor/github.com/xanzy/go-gitlab/strings.go
@@ -1,5 +1,5 @@
//
-// Copyright 2015, Sander van Harmelen
+// Copyright 2017, Sander van Harmelen
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
diff --git a/vendor/github.com/xanzy/go-gitlab/system_hooks.go b/vendor/github.com/xanzy/go-gitlab/system_hooks.go
index d2d3f5b..77e8a8b 100644
--- a/vendor/github.com/xanzy/go-gitlab/system_hooks.go
+++ b/vendor/github.com/xanzy/go-gitlab/system_hooks.go
@@ -1,5 +1,5 @@
//
-// Copyright 2015, Sander van Harmelen
+// Copyright 2017, Sander van Harmelen
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -24,16 +24,14 @@ import (
// SystemHooksService handles communication with the system hooks related
// methods of the GitLab API.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/system_hooks.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/system_hooks.html
type SystemHooksService struct {
client *Client
}
// Hook represents a GitLap system hook.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/system_hooks.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/system_hooks.html
type Hook struct {
ID int `json:"id"`
URL string `json:"url"`
@@ -47,7 +45,7 @@ func (h Hook) String() string {
// ListHooks gets a list of system hooks.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/system_hooks.md#list-system-hooks
+// https://docs.gitlab.com/ce/api/system_hooks.html#list-system-hooks
func (s *SystemHooksService) ListHooks(options ...OptionFunc) ([]*Hook, *Response, error) {
req, err := s.client.NewRequest("GET", "hooks", nil, options)
if err != nil {
@@ -66,7 +64,7 @@ func (s *SystemHooksService) ListHooks(options ...OptionFunc) ([]*Hook, *Respons
// AddHookOptions represents the available AddHook() options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/system_hooks.md#add-new-system-hook-hook
+// https://docs.gitlab.com/ce/api/system_hooks.html#add-new-system-hook-hook
type AddHookOptions struct {
URL *string `url:"url,omitempty" json:"url,omitempty"`
}
@@ -74,7 +72,7 @@ type AddHookOptions struct {
// AddHook adds a new system hook hook.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/system_hooks.md#add-new-system-hook-hook
+// https://docs.gitlab.com/ce/api/system_hooks.html#add-new-system-hook-hook
func (s *SystemHooksService) AddHook(opt *AddHookOptions, options ...OptionFunc) (*Hook, *Response, error) {
req, err := s.client.NewRequest("POST", "hooks", opt, options)
if err != nil {
@@ -92,8 +90,7 @@ func (s *SystemHooksService) AddHook(opt *AddHookOptions, options ...OptionFunc)
// HookEvent represents an event triggert by a GitLab system hook.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/system_hooks.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/system_hooks.html
type HookEvent struct {
EventName string `json:"event_name"`
Name string `json:"name"`
@@ -110,7 +107,7 @@ func (h HookEvent) String() string {
// TestHook tests a system hook.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/system_hooks.md#test-system-hook
+// https://docs.gitlab.com/ce/api/system_hooks.html#test-system-hook
func (s *SystemHooksService) TestHook(hook int, options ...OptionFunc) (*HookEvent, *Response, error) {
u := fmt.Sprintf("hooks/%d", hook)
@@ -133,7 +130,7 @@ func (s *SystemHooksService) TestHook(hook int, options ...OptionFunc) (*HookEve
// is also returned as JSON.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/system_hooks.md#delete-system-hook
+// https://docs.gitlab.com/ce/api/system_hooks.html#delete-system-hook
func (s *SystemHooksService) DeleteHook(hook int, options ...OptionFunc) (*Response, error) {
u := fmt.Sprintf("hooks/%d", hook)
diff --git a/vendor/github.com/xanzy/go-gitlab/tags.go b/vendor/github.com/xanzy/go-gitlab/tags.go
index 10bfdf5..f24822a 100644
--- a/vendor/github.com/xanzy/go-gitlab/tags.go
+++ b/vendor/github.com/xanzy/go-gitlab/tags.go
@@ -1,5 +1,5 @@
//
-// Copyright 2015, Sander van Harmelen
+// Copyright 2017, Sander van Harmelen
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -24,31 +24,33 @@ import (
// TagsService handles communication with the tags related methods
// of the GitLab API.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/tags.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/tags.html
type TagsService struct {
client *Client
}
// Tag represents a GitLab tag.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/tags.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/tags.html
type Tag struct {
Commit *Commit `json:"commit"`
- Name string `json:"name"`
- Message string `json:"message"`
+ Release struct {
+ TagName string `json:"tag_name"`
+ Description string `json:"description"`
+ } `json:"release"`
+ Name string `json:"name"`
+ Message string `json:"message"`
}
-func (r Tag) String() string {
- return Stringify(r)
+func (t Tag) String() string {
+ return Stringify(t)
}
// ListTags gets a list of tags from a project, sorted by name in reverse
// alphabetical order.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/tags.md#list-project-repository-tags
+// https://docs.gitlab.com/ce/api/tags.html#list-project-repository-tags
func (s *TagsService) ListTags(pid interface{}, options ...OptionFunc) ([]*Tag, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -74,7 +76,7 @@ func (s *TagsService) ListTags(pid interface{}, options ...OptionFunc) ([]*Tag,
// with the tag information if the tag exists. It returns 404 if the tag does not exist.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/tags.md#get-a-single-repository-tag
+// https://docs.gitlab.com/ce/api/tags.html#get-a-single-repository-tag
func (s *TagsService) GetTag(pid interface{}, tag string, options ...OptionFunc) (*Tag, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -99,17 +101,18 @@ func (s *TagsService) GetTag(pid interface{}, tag string, options ...OptionFunc)
// CreateTagOptions represents the available CreateTag() options.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/tags.md#create-a-new-tag
+// https://docs.gitlab.com/ce/api/tags.html#create-a-new-tag
type CreateTagOptions struct {
- TagName *string `url:"tag_name,omitempty" json:"tag_name,omitempty"`
- Ref *string `url:"ref,omitempty" json:"ref,omitempty"`
- Message *string `url:"message,omitempty" json:"message,omitempty"`
+ TagName *string `url:"tag_name,omitempty" json:"tag_name,omitempty"`
+ Ref *string `url:"ref,omitempty" json:"ref,omitempty"`
+ Message *string `url:"message,omitempty" json:"message,omitempty"`
+ ReleaseDescription *string `url:"release_description:omitempty" json:"release_description,omitempty"`
}
// CreateTag creates a new tag in the repository that points to the supplied ref.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/tags.md#create-a-new-tag
+// https://docs.gitlab.com/ce/api/tags.html#create-a-new-tag
func (s *TagsService) CreateTag(pid interface{}, opt *CreateTagOptions, options ...OptionFunc) (*Tag, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -134,7 +137,7 @@ func (s *TagsService) CreateTag(pid interface{}, opt *CreateTagOptions, options
// DeleteTag deletes a tag of a repository with given name.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/tags.md#delete-a-tag
+// https://docs.gitlab.com/ce/api/tags.html#delete-a-tag
func (s *TagsService) DeleteTag(pid interface{}, tag string, options ...OptionFunc) (*Response, error) {
project, err := parseID(pid)
if err != nil {
diff --git a/vendor/github.com/xanzy/go-gitlab/time_stats.go b/vendor/github.com/xanzy/go-gitlab/time_stats.go
index 0e8fa55..5e3bff9 100644
--- a/vendor/github.com/xanzy/go-gitlab/time_stats.go
+++ b/vendor/github.com/xanzy/go-gitlab/time_stats.go
@@ -5,20 +5,17 @@ import (
"net/url"
)
-// TimeStatsService handles communication with the time tracking related
+// timeStatsService handles communication with the time tracking related
// methods of the GitLab API.
//
-// GitLab docs: https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/workflow/time_tracking.md
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md
-type TimeStatsService struct {
+// GitLab docs: https://docs.gitlab.com/ce/workflow/time_tracking.html
+type timeStatsService struct {
client *Client
}
// TimeStats represents the time estimates and time spent for an issue.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md
+// GitLab docs: https://docs.gitlab.com/ce/workflow/time_tracking.html
type TimeStats struct {
HumanTimeEstimate string `json:"human_time_estimate"`
HumanTotalTimeSpent string `json:"human_total_time_spent"`
@@ -33,22 +30,20 @@ func (t TimeStats) String() string {
// SetTimeEstimateOptions represents the available SetTimeEstimate()
// options.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md#set-a-time-estimate-for-an-issue
+// GitLab docs: https://docs.gitlab.com/ce/workflow/time_tracking.html
type SetTimeEstimateOptions struct {
Duration *string `url:"duration,omitempty" json:"duration,omitempty"`
}
-// SetTimeEstimate sets the time estimate for a single project issue.
+// setTimeEstimate sets the time estimate for a single project issue.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md#set-a-time-estimate-for-an-issue
-func (s *TimeStatsService) SetTimeEstimate(pid interface{}, issue int, opt *SetTimeEstimateOptions, options ...OptionFunc) (*TimeStats, *Response, error) {
+// GitLab docs: https://docs.gitlab.com/ce/workflow/time_tracking.html
+func (s *timeStatsService) setTimeEstimate(pid interface{}, entity string, issue int, opt *SetTimeEstimateOptions, options ...OptionFunc) (*TimeStats, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
- u := fmt.Sprintf("projects/%s/issues/%d/time_estimate", url.QueryEscape(project), issue)
+ u := fmt.Sprintf("projects/%s/%s/%d/time_estimate", url.QueryEscape(project), entity, issue)
req, err := s.client.NewRequest("POST", u, opt, options)
if err != nil {
@@ -64,16 +59,15 @@ func (s *TimeStatsService) SetTimeEstimate(pid interface{}, issue int, opt *SetT
return t, resp, err
}
-// ResetTimeEstimate resets the time estimate for a single project issue.
+// resetTimeEstimate resets the time estimate for a single project issue.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md#reset-the-time-estimate-for-an-issue
-func (s *TimeStatsService) ResetTimeEstimate(pid interface{}, issue int, options ...OptionFunc) (*TimeStats, *Response, error) {
+// GitLab docs: https://docs.gitlab.com/ce/workflow/time_tracking.html
+func (s *timeStatsService) resetTimeEstimate(pid interface{}, entity string, issue int, options ...OptionFunc) (*TimeStats, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
- u := fmt.Sprintf("projects/%s/issues/%d/reset_time_estimate", url.QueryEscape(project), issue)
+ u := fmt.Sprintf("projects/%s/%s/%d/reset_time_estimate", url.QueryEscape(project), entity, issue)
req, err := s.client.NewRequest("POST", u, nil, options)
if err != nil {
@@ -91,22 +85,20 @@ func (s *TimeStatsService) ResetTimeEstimate(pid interface{}, issue int, options
// AddSpentTimeOptions represents the available AddSpentTime() options.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md#add-spent-time-for-an-issue
+// GitLab docs: https://docs.gitlab.com/ce/workflow/time_tracking.html
type AddSpentTimeOptions struct {
Duration *string `url:"duration,omitempty" json:"duration,omitempty"`
}
-// AddSpentTime adds spent time for a single project issue.
+// addSpentTime adds spent time for a single project issue.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md#add-spent-time-for-an-issue
-func (s *TimeStatsService) AddSpentTime(pid interface{}, issue int, opt *AddSpentTimeOptions, options ...OptionFunc) (*TimeStats, *Response, error) {
+// GitLab docs: https://docs.gitlab.com/ce/workflow/time_tracking.html
+func (s *timeStatsService) addSpentTime(pid interface{}, entity string, issue int, opt *AddSpentTimeOptions, options ...OptionFunc) (*TimeStats, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
- u := fmt.Sprintf("projects/%s/issues/%d/add_spent_time", url.QueryEscape(project), issue)
+ u := fmt.Sprintf("projects/%s/%s/%d/add_spent_time", url.QueryEscape(project), entity, issue)
req, err := s.client.NewRequest("POST", u, opt, options)
if err != nil {
@@ -122,16 +114,15 @@ func (s *TimeStatsService) AddSpentTime(pid interface{}, issue int, opt *AddSpen
return t, resp, err
}
-// ResetSpentTime resets the spent time for a single project issue.
+// resetSpentTime resets the spent time for a single project issue.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md#reset-spent-time-for-an-issue
-func (s *TimeStatsService) ResetSpentTime(pid interface{}, issue int, options ...OptionFunc) (*TimeStats, *Response, error) {
+// GitLab docs: https://docs.gitlab.com/ce/workflow/time_tracking.html
+func (s *timeStatsService) resetSpentTime(pid interface{}, entity string, issue int, options ...OptionFunc) (*TimeStats, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
- u := fmt.Sprintf("projects/%s/issues/%d/reset_spent_time", url.QueryEscape(project), issue)
+ u := fmt.Sprintf("projects/%s/%s/%d/reset_spent_time", url.QueryEscape(project), entity, issue)
req, err := s.client.NewRequest("POST", u, nil, options)
if err != nil {
@@ -147,16 +138,15 @@ func (s *TimeStatsService) ResetSpentTime(pid interface{}, issue int, options ..
return t, resp, err
}
-// GetTimeSpent gets the spent time for a single project issue.
+// getTimeSpent gets the spent time for a single project issue.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md#get-time-tracking-stats
-func (s *TimeStatsService) GetTimeSpent(pid interface{}, issue int, options ...OptionFunc) (*TimeStats, *Response, error) {
+// GitLab docs: https://docs.gitlab.com/ce/workflow/time_tracking.html
+func (s *timeStatsService) getTimeSpent(pid interface{}, entity string, issue int, options ...OptionFunc) (*TimeStats, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
- u := fmt.Sprintf("projects/%s/issues/%d/time_stats", url.QueryEscape(project), issue)
+ u := fmt.Sprintf("projects/%s/%s/%d/time_stats", url.QueryEscape(project), entity, issue)
req, err := s.client.NewRequest("GET", u, nil, options)
if err != nil {
diff --git a/vendor/github.com/xanzy/go-gitlab/todos.go b/vendor/github.com/xanzy/go-gitlab/todos.go
new file mode 100644
index 0000000..db58edd
--- /dev/null
+++ b/vendor/github.com/xanzy/go-gitlab/todos.go
@@ -0,0 +1,175 @@
+package gitlab
+
+import "time"
+import "fmt"
+
+// TodosService handles communication with the todos related methods of
+// the Gitlab API.
+//
+// GitLab API docs: https://docs.gitlab.com/ce/api/todos.html
+type TodosService struct {
+ client *Client
+}
+
+// TodoAction represents the available actions that can be performed on a todo.
+//
+// GitLab API docs: https://docs.gitlab.com/ce/api/todos.html
+type TodoAction string
+
+// The available todo actions.
+const (
+ TodoAssigned TodoAction = "assigned"
+ TodoMentioned TodoAction = "mentioned"
+ TodoBuildFailed TodoAction = "build_failed"
+ TodoMarked TodoAction = "marked"
+ TodoApprovalRequired TodoAction = "approval_required"
+ TodoDirectlyAddressed TodoAction = "directly_addressed"
+)
+
+// TodoTarget represents a todo target of type Issue or MergeRequest
+type TodoTarget struct {
+ // TODO: replace both Assignee and Author structs with v4 User struct
+ Assignee struct {
+ Name string `json:"name"`
+ Username string `json:"username"`
+ ID int `json:"id"`
+ State string `json:"state"`
+ AvatarURL string `json:"avatar_url"`
+ WebURL string `json:"web_url"`
+ } `json:"assignee"`
+ Author struct {
+ Name string `json:"name"`
+ Username string `json:"username"`
+ ID int `json:"id"`
+ State string `json:"state"`
+ AvatarURL string `json:"avatar_url"`
+ WebURL string `json:"web_url"`
+ } `json:"author"`
+ CreatedAt *time.Time `json:"created_at"`
+ Description string `json:"description"`
+ Downvotes int `json:"downvotes"`
+ ID int `json:"id"`
+ IID int `json:"iid"`
+ Labels []string `json:"labels"`
+ Milestone Milestone `json:"milestone"`
+ ProjectID int `json:"project_id"`
+ State string `json:"state"`
+ Subscribed bool `json:"subscribed"`
+ Title string `json:"title"`
+ UpdatedAt *time.Time `json:"updated_at"`
+ Upvotes int `json:"upvotes"`
+ UserNotesCount int `json:"user_notes_count"`
+ WebURL string `json:"web_url"`
+
+ // Only available for type Issue
+ Confidential bool `json:"confidential"`
+ DueDate string `json:"due_date"`
+ Weight int `json:"weight"`
+
+ // Only available for type MergeRequest
+ ApprovalsBeforeMerge bool `json:"approvals_before_merge"`
+ ForceRemoveSourceBranch bool `json:"force_remove_source_branch"`
+ MergeCommitSha string `json:"merge_commit_sha"`
+ MergeWhenPipelineSucceeds bool `json:"merge_when_pipeline_succeeds"`
+ MergeStatus string `json:"merge_status"`
+ Sha string `json:"sha"`
+ ShouldRemoveSourceBranch bool `json:"should_remove_source_branch"`
+ SourceBranch string `json:"source_branch"`
+ SourceProjectID int `json:"source_project_id"`
+ Squash bool `json:"squash"`
+ TargetBranch string `json:"target_branch"`
+ TargetProjectID int `json:"target_project_id"`
+ WorkInProgress bool `json:"work_in_progress"`
+}
+
+// Todo represents a GitLab todo.
+//
+// GitLab API docs: https://docs.gitlab.com/ce/api/todos.html
+type Todo struct {
+ ID int `json:"id"`
+ Project struct {
+ ID int `json:"id"`
+ HTTPURLToRepo string `json:"http_url_to_repo"`
+ WebURL string `json:"web_url"`
+ Name string `json:"name"`
+ NameWithNamespace string `json:"name_with_namespace"`
+ Path string `json:"path"`
+ PathWithNamespace string `json:"path_with_namespace"`
+ } `json:"project"`
+ Author struct {
+ ID int `json:"id"`
+ Name string `json:"name"`
+ Username string `json:"username"`
+ State string `json:"state"`
+ AvatarURL string `json:"avatar_url"`
+ WebURL string `json:"web_url"`
+ } `json:"author"`
+ ActionName TodoAction `json:"action_name"`
+ TargetType string `json:"target_type"`
+ Target TodoTarget `json:"target"`
+ TargetURL string `json:"target_url"`
+ Body string `json:"body"`
+ State string `json:"state"`
+ CreatedAt *time.Time `json:"created_at"`
+}
+
+func (t Todo) String() string {
+ return Stringify(t)
+}
+
+// ListTodosOptions represents the available ListTodos() options.
+//
+// GitLab API docs: https://docs.gitlab.com/ce/api/todos.html#get-a-list-of-todos
+type ListTodosOptions struct {
+ Action *TodoAction `url:"action,omitempty" json:"action,omitempty"`
+ AuthorID *int `url:"author_id,omitempty" json:"author_id,omitempty"`
+ ProjectID *int `url:"project_id,omitempty" json:"project_id,omitempty"`
+ State *string `url:"state,omitempty" json:"state,omitempty"`
+ Type *string `url:"type,omitempty" json:"type,omitempty"`
+}
+
+// ListTodos lists all todos created by authenticated user.
+// When no filter is applied, it returns all pending todos for the current user.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/todos.html#get-a-list-of-todos
+func (s *TodosService) ListTodos(opt *ListTodosOptions, options ...OptionFunc) ([]*Todo, *Response, error) {
+ req, err := s.client.NewRequest("GET", "todos", opt, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var t []*Todo
+ resp, err := s.client.Do(req, &t)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return t, resp, err
+}
+
+// MarkTodoAsDone marks a single pending todo given by its ID for the current user as done.
+//
+// GitLab API docs: https://docs.gitlab.com/ce/api/todos.html#mark-a-todo-as-done
+func (s *TodosService) MarkTodoAsDone(id int, options ...OptionFunc) (*Response, error) {
+ u := fmt.Sprintf("todos/%d/mark_as_done", id)
+
+ req, err := s.client.NewRequest("POST", u, nil, options)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
+
+// MarkAllTodosAsDone marks all pending todos for the current user as done.
+//
+// GitLab API docs: https://docs.gitlab.com/ce/api/todos.html#mark-all-todos-as-done
+func (s *TodosService) MarkAllTodosAsDone(options ...OptionFunc) (*Response, error) {
+ req, err := s.client.NewRequest("POST", "todos/mark_as_done", nil, options)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
diff --git a/vendor/github.com/xanzy/go-gitlab/users.go b/vendor/github.com/xanzy/go-gitlab/users.go
index 350d593..b24f45d 100644
--- a/vendor/github.com/xanzy/go-gitlab/users.go
+++ b/vendor/github.com/xanzy/go-gitlab/users.go
@@ -1,5 +1,5 @@
//
-// Copyright 2015, Sander van Harmelen
+// Copyright 2017, Sander van Harmelen
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -25,16 +25,14 @@ import (
// UsersService handles communication with the user related methods of
// the GitLab API.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/users.html
type UsersService struct {
client *Client
}
// User represents a GitLab user.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md
+// GitLab API docs: https://docs.gitlab.com/ce/api/users.html
type User struct {
ID int `json:"id"`
Username string `json:"username"`
@@ -70,8 +68,7 @@ type UserIdentity struct {
// ListUsersOptions represents the available ListUsers() options.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#list-users
+// GitLab API docs: https://docs.gitlab.com/ce/api/users.html#list-users
type ListUsersOptions struct {
ListOptions
Active *bool `url:"active,omitempty" json:"active,omitempty"`
@@ -81,8 +78,7 @@ type ListUsersOptions struct {
// ListUsers gets a list of users.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#list-users
+// GitLab API docs: https://docs.gitlab.com/ce/api/users.html#list-users
func (s *UsersService) ListUsers(opt *ListUsersOptions, options ...OptionFunc) ([]*User, *Response, error) {
req, err := s.client.NewRequest("GET", "users", opt, options)
if err != nil {
@@ -100,8 +96,7 @@ func (s *UsersService) ListUsers(opt *ListUsersOptions, options ...OptionFunc) (
// GetUser gets a single user.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#single-user
+// GitLab API docs: https://docs.gitlab.com/ce/api/users.html#single-user
func (s *UsersService) GetUser(user int, options ...OptionFunc) (*User, *Response, error) {
u := fmt.Sprintf("users/%d", user)
@@ -121,30 +116,28 @@ func (s *UsersService) GetUser(user int, options ...OptionFunc) (*User, *Respons
// CreateUserOptions represents the available CreateUser() options.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#user-creation
+// GitLab API docs: https://docs.gitlab.com/ce/api/users.html#user-creation
type CreateUserOptions struct {
- Email *string `url:"email,omitempty" json:"email,omitempty"`
- Password *string `url:"password,omitempty" json:"password,omitempty"`
- Username *string `url:"username,omitempty" json:"username,omitempty"`
- Name *string `url:"name,omitempty" json:"name,omitempty"`
- Skype *string `url:"skype,omitempty" json:"skype,omitempty"`
- Linkedin *string `url:"linkedin,omitempty" json:"linkedin,omitempty"`
- Twitter *string `url:"twitter,omitempty" json:"twitter,omitempty"`
- WebsiteURL *string `url:"website_url,omitempty" json:"website_url,omitempty"`
- ProjectsLimit *int `url:"projects_limit,omitempty" json:"projects_limit,omitempty"`
- ExternUID *string `url:"extern_uid,omitempty" json:"extern_uid,omitempty"`
- Provider *string `url:"provider,omitempty" json:"provider,omitempty"`
- Bio *string `url:"bio,omitempty" json:"bio,omitempty"`
- Admin *bool `url:"admin,omitempty" json:"admin,omitempty"`
- CanCreateGroup *bool `url:"can_create_group,omitempty" json:"can_create_group,omitempty"`
- Confirm *bool `url:"confirm,omitempty" json:"confirm,omitempty"`
+ Email *string `url:"email,omitempty" json:"email,omitempty"`
+ Password *string `url:"password,omitempty" json:"password,omitempty"`
+ Username *string `url:"username,omitempty" json:"username,omitempty"`
+ Name *string `url:"name,omitempty" json:"name,omitempty"`
+ Skype *string `url:"skype,omitempty" json:"skype,omitempty"`
+ Linkedin *string `url:"linkedin,omitempty" json:"linkedin,omitempty"`
+ Twitter *string `url:"twitter,omitempty" json:"twitter,omitempty"`
+ WebsiteURL *string `url:"website_url,omitempty" json:"website_url,omitempty"`
+ ProjectsLimit *int `url:"projects_limit,omitempty" json:"projects_limit,omitempty"`
+ ExternUID *string `url:"extern_uid,omitempty" json:"extern_uid,omitempty"`
+ Provider *string `url:"provider,omitempty" json:"provider,omitempty"`
+ Bio *string `url:"bio,omitempty" json:"bio,omitempty"`
+ Admin *bool `url:"admin,omitempty" json:"admin,omitempty"`
+ CanCreateGroup *bool `url:"can_create_group,omitempty" json:"can_create_group,omitempty"`
+ SkipConfirmation *bool `url:"skip_confirmation,omitempty" json:"skip_confirmation,omitempty"`
}
// CreateUser creates a new user. Note only administrators can create new users.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#user-creation
+// GitLab API docs: https://docs.gitlab.com/ce/api/users.html#user-creation
func (s *UsersService) CreateUser(opt *CreateUserOptions, options ...OptionFunc) (*User, *Response, error) {
req, err := s.client.NewRequest("POST", "users", opt, options)
if err != nil {
@@ -162,8 +155,7 @@ func (s *UsersService) CreateUser(opt *CreateUserOptions, options ...OptionFunc)
// ModifyUserOptions represents the available ModifyUser() options.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#user-modification
+// GitLab API docs: https://docs.gitlab.com/ce/api/users.html#user-modification
type ModifyUserOptions struct {
Email *string `url:"email,omitempty" json:"email,omitempty"`
Password *string `url:"password,omitempty" json:"password,omitempty"`
@@ -184,8 +176,7 @@ type ModifyUserOptions struct {
// ModifyUser modifies an existing user. Only administrators can change attributes
// of a user.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#user-modification
+// GitLab API docs: https://docs.gitlab.com/ce/api/users.html#user-modification
func (s *UsersService) ModifyUser(user int, opt *ModifyUserOptions, options ...OptionFunc) (*User, *Response, error) {
u := fmt.Sprintf("users/%d", user)
@@ -209,8 +200,7 @@ func (s *UsersService) ModifyUser(user int, opt *ModifyUserOptions, options ...O
// actually deleted or not. In the former the user is returned and in the
// latter not.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#user-deletion
+// GitLab API docs: https://docs.gitlab.com/ce/api/users.html#user-deletion
func (s *UsersService) DeleteUser(user int, options ...OptionFunc) (*Response, error) {
u := fmt.Sprintf("users/%d", user)
@@ -224,8 +214,7 @@ func (s *UsersService) DeleteUser(user int, options ...OptionFunc) (*Response, e
// CurrentUser gets currently authenticated user.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#current-user
+// GitLab API docs: https://docs.gitlab.com/ce/api/users.html#current-user
func (s *UsersService) CurrentUser(options ...OptionFunc) (*User, *Response, error) {
req, err := s.client.NewRequest("GET", "user", nil, options)
if err != nil {
@@ -243,8 +232,7 @@ func (s *UsersService) CurrentUser(options ...OptionFunc) (*User, *Response, err
// SSHKey represents a SSH key.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#list-ssh-keys
+// GitLab API docs: https://docs.gitlab.com/ce/api/users.html#list-ssh-keys
type SSHKey struct {
ID int `json:"id"`
Title string `json:"title"`
@@ -254,8 +242,7 @@ type SSHKey struct {
// ListSSHKeys gets a list of currently authenticated user's SSH keys.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#list-ssh-keys
+// GitLab API docs: https://docs.gitlab.com/ce/api/users.html#list-ssh-keys
func (s *UsersService) ListSSHKeys(options ...OptionFunc) ([]*SSHKey, *Response, error) {
req, err := s.client.NewRequest("GET", "user/keys", nil, options)
if err != nil {
@@ -275,7 +262,7 @@ func (s *UsersService) ListSSHKeys(options ...OptionFunc) ([]*SSHKey, *Response,
// only for admin
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#list-ssh-keys-for-user
+// https://docs.gitlab.com/ce/api/users.html#list-ssh-keys-for-user
func (s *UsersService) ListSSHKeysForUser(user int, options ...OptionFunc) ([]*SSHKey, *Response, error) {
u := fmt.Sprintf("users/%d/keys", user)
@@ -295,10 +282,9 @@ func (s *UsersService) ListSSHKeysForUser(user int, options ...OptionFunc) ([]*S
// GetSSHKey gets a single key.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#single-ssh-key
-func (s *UsersService) GetSSHKey(kid int, options ...OptionFunc) (*SSHKey, *Response, error) {
- u := fmt.Sprintf("user/keys/%d", kid)
+// GitLab API docs: https://docs.gitlab.com/ce/api/users.html#single-ssh-key
+func (s *UsersService) GetSSHKey(key int, options ...OptionFunc) (*SSHKey, *Response, error) {
+ u := fmt.Sprintf("user/keys/%d", key)
req, err := s.client.NewRequest("GET", u, nil, options)
if err != nil {
@@ -316,8 +302,7 @@ func (s *UsersService) GetSSHKey(kid int, options ...OptionFunc) (*SSHKey, *Resp
// AddSSHKeyOptions represents the available AddSSHKey() options.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#add-ssh-key
+// GitLab API docs: https://docs.gitlab.com/ce/api/projects.html#add-ssh-key
type AddSSHKeyOptions struct {
Title *string `url:"title,omitempty" json:"title,omitempty"`
Key *string `url:"key,omitempty" json:"key,omitempty"`
@@ -325,8 +310,7 @@ type AddSSHKeyOptions struct {
// AddSSHKey creates a new key owned by the currently authenticated user.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#add-ssh-key
+// GitLab API docs: https://docs.gitlab.com/ce/api/users.html#add-ssh-key
func (s *UsersService) AddSSHKey(opt *AddSSHKeyOptions, options ...OptionFunc) (*SSHKey, *Response, error) {
req, err := s.client.NewRequest("POST", "user/keys", opt, options)
if err != nil {
@@ -345,8 +329,7 @@ func (s *UsersService) AddSSHKey(opt *AddSSHKeyOptions, options ...OptionFunc) (
// AddSSHKeyForUser creates new key owned by specified user. Available only for
// admin.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#add-ssh-key-for-user
+// GitLab API docs: https://docs.gitlab.com/ce/api/users.html#add-ssh-key-for-user
func (s *UsersService) AddSSHKeyForUser(user int, opt *AddSSHKeyOptions, options ...OptionFunc) (*SSHKey, *Response, error) {
u := fmt.Sprintf("users/%d/keys", user)
@@ -369,9 +352,9 @@ func (s *UsersService) AddSSHKeyForUser(user int, opt *AddSSHKeyOptions, options
// available results in 200 OK.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#delete-ssh-key-for-current-owner
-func (s *UsersService) DeleteSSHKey(kid int, options ...OptionFunc) (*Response, error) {
- u := fmt.Sprintf("user/keys/%d", kid)
+// https://docs.gitlab.com/ce/api/users.html#delete-ssh-key-for-current-owner
+func (s *UsersService) DeleteSSHKey(key int, options ...OptionFunc) (*Response, error) {
+ u := fmt.Sprintf("user/keys/%d", key)
req, err := s.client.NewRequest("DELETE", u, nil, options)
if err != nil {
@@ -385,9 +368,9 @@ func (s *UsersService) DeleteSSHKey(kid int, options ...OptionFunc) (*Response,
// for admin.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#delete-ssh-key-for-given-user
-func (s *UsersService) DeleteSSHKeyForUser(user int, kid int, options ...OptionFunc) (*Response, error) {
- u := fmt.Sprintf("users/%d/keys/%d", user, kid)
+// https://docs.gitlab.com/ce/api/users.html#delete-ssh-key-for-given-user
+func (s *UsersService) DeleteSSHKeyForUser(user, key int, options ...OptionFunc) (*Response, error) {
+ u := fmt.Sprintf("users/%d/keys/%d", user, key)
req, err := s.client.NewRequest("DELETE", u, nil, options)
if err != nil {
@@ -399,12 +382,11 @@ func (s *UsersService) DeleteSSHKeyForUser(user int, kid int, options ...OptionF
// BlockUser blocks the specified user. Available only for admin.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#block-user
+// GitLab API docs: https://docs.gitlab.com/ce/api/users.html#block-user
func (s *UsersService) BlockUser(user int, options ...OptionFunc) error {
u := fmt.Sprintf("users/%d/block", user)
- req, err := s.client.NewRequest("PUT", u, nil, options)
+ req, err := s.client.NewRequest("POST", u, nil, options)
if err != nil {
return err
}
@@ -428,12 +410,11 @@ func (s *UsersService) BlockUser(user int, options ...OptionFunc) error {
// UnblockUser unblocks the specified user. Available only for admin.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#unblock-user
+// GitLab API docs: https://docs.gitlab.com/ce/api/users.html#unblock-user
func (s *UsersService) UnblockUser(user int, options ...OptionFunc) error {
u := fmt.Sprintf("users/%d/unblock", user)
- req, err := s.client.NewRequest("PUT", u, nil, options)
+ req, err := s.client.NewRequest("POST", u, nil, options)
if err != nil {
return err
}
@@ -465,8 +446,7 @@ type Email struct {
// ListEmails gets a list of currently authenticated user's Emails.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#list-emails
+// GitLab API docs: https://docs.gitlab.com/ce/api/users.html#list-emails
func (s *UsersService) ListEmails(options ...OptionFunc) ([]*Email, *Response, error) {
req, err := s.client.NewRequest("GET", "user/emails", nil, options)
if err != nil {
@@ -486,9 +466,9 @@ func (s *UsersService) ListEmails(options ...OptionFunc) ([]*Email, *Response, e
// only for admin
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#list-emails-for-user
-func (s *UsersService) ListEmailsForUser(uid int, options ...OptionFunc) ([]*Email, *Response, error) {
- u := fmt.Sprintf("users/%d/emails", uid)
+// https://docs.gitlab.com/ce/api/users.html#list-emails-for-user
+func (s *UsersService) ListEmailsForUser(user int, options ...OptionFunc) ([]*Email, *Response, error) {
+ u := fmt.Sprintf("users/%d/emails", user)
req, err := s.client.NewRequest("GET", u, nil, options)
if err != nil {
@@ -506,10 +486,9 @@ func (s *UsersService) ListEmailsForUser(uid int, options ...OptionFunc) ([]*Ema
// GetEmail gets a single email.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#single-email
-func (s *UsersService) GetEmail(eid int, options ...OptionFunc) (*Email, *Response, error) {
- u := fmt.Sprintf("user/emails/%d", eid)
+// GitLab API docs: https://docs.gitlab.com/ce/api/users.html#single-email
+func (s *UsersService) GetEmail(email int, options ...OptionFunc) (*Email, *Response, error) {
+ u := fmt.Sprintf("user/emails/%d", email)
req, err := s.client.NewRequest("GET", u, nil, options)
if err != nil {
@@ -527,16 +506,14 @@ func (s *UsersService) GetEmail(eid int, options ...OptionFunc) (*Email, *Respon
// AddEmailOptions represents the available AddEmail() options.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#add-email
+// GitLab API docs: https://docs.gitlab.com/ce/api/projects.html#add-email
type AddEmailOptions struct {
Email *string `url:"email,omitempty" json:"email,omitempty"`
}
// AddEmail creates a new email owned by the currently authenticated user.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#add-email
+// GitLab API docs: https://docs.gitlab.com/ce/api/users.html#add-email
func (s *UsersService) AddEmail(opt *AddEmailOptions, options ...OptionFunc) (*Email, *Response, error) {
req, err := s.client.NewRequest("POST", "user/emails", opt, options)
if err != nil {
@@ -555,10 +532,9 @@ func (s *UsersService) AddEmail(opt *AddEmailOptions, options ...OptionFunc) (*E
// AddEmailForUser creates new email owned by specified user. Available only for
// admin.
//
-// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#add-email-for-user
-func (s *UsersService) AddEmailForUser(uid int, opt *AddEmailOptions, options ...OptionFunc) (*Email, *Response, error) {
- u := fmt.Sprintf("users/%d/emails", uid)
+// GitLab API docs: https://docs.gitlab.com/ce/api/users.html#add-email-for-user
+func (s *UsersService) AddEmailForUser(user int, opt *AddEmailOptions, options ...OptionFunc) (*Email, *Response, error) {
+ u := fmt.Sprintf("users/%d/emails", user)
req, err := s.client.NewRequest("POST", u, opt, options)
if err != nil {
@@ -579,9 +555,9 @@ func (s *UsersService) AddEmailForUser(uid int, opt *AddEmailOptions, options ..
// available results in 200 OK.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#delete-email-for-current-owner
-func (s *UsersService) DeleteEmail(eid int, options ...OptionFunc) (*Response, error) {
- u := fmt.Sprintf("user/emails/%d", eid)
+// https://docs.gitlab.com/ce/api/users.html#delete-email-for-current-owner
+func (s *UsersService) DeleteEmail(email int, options ...OptionFunc) (*Response, error) {
+ u := fmt.Sprintf("user/emails/%d", email)
req, err := s.client.NewRequest("DELETE", u, nil, options)
if err != nil {
@@ -595,9 +571,122 @@ func (s *UsersService) DeleteEmail(eid int, options ...OptionFunc) (*Response, e
// for admin.
//
// GitLab API docs:
-// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#delete-email-for-given-user
-func (s *UsersService) DeleteEmailForUser(uid int, eid int, options ...OptionFunc) (*Response, error) {
- u := fmt.Sprintf("users/%d/emails/%d", uid, eid)
+// https://docs.gitlab.com/ce/api/users.html#delete-email-for-given-user
+func (s *UsersService) DeleteEmailForUser(user, email int, options ...OptionFunc) (*Response, error) {
+ u := fmt.Sprintf("users/%d/emails/%d", user, email)
+
+ req, err := s.client.NewRequest("DELETE", u, nil, options)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
+
+// ImpersonationToken represents an impersonation token.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/users.html#get-all-impersonation-tokens-of-a-user
+type ImpersonationToken struct {
+ ID int `json:"id"`
+ Name string `json:"name"`
+ Active bool `json:"active"`
+ Token string `json:"token"`
+ Scopes []string `json:"scopes"`
+ Revoked bool `json:"revoked"`
+ CreatedAt *time.Time `json:"created_at"`
+ ExpiresAt *time.Time `json:"expires_at"`
+}
+
+// GetAllImpersonationTokensOptions represents the available
+// GetAllImpersonationTokens() options.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/users.html#get-all-impersonation-tokens-of-a-user
+type GetAllImpersonationTokensOptions struct {
+ State *string `url:"state,omitempty" json:"state,omitempty"`
+}
+
+// GetAllImpersonationTokens retrieves all impersonation tokens of a user.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/users.html#get-all-impersonation-tokens-of-a-user
+func (s *UsersService) GetAllImpersonationTokens(user int, opt *GetAllImpersonationTokensOptions, options ...OptionFunc) ([]*ImpersonationToken, *Response, error) {
+ u := fmt.Sprintf("users/%d/impersonation_tokens", user)
+
+ req, err := s.client.NewRequest("GET", u, opt, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var ts []*ImpersonationToken
+ resp, err := s.client.Do(req, &ts)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return ts, resp, err
+}
+
+// GetImpersonationToken retrieves an impersonation token of a user.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/users.html#get-an-impersonation-token-of-a-user
+func (s *UsersService) GetImpersonationToken(user, token int, options ...OptionFunc) (*ImpersonationToken, *Response, error) {
+ u := fmt.Sprintf("users/%d/impersonation_tokens/%d", user, token)
+
+ req, err := s.client.NewRequest("GET", u, nil, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ t := new(ImpersonationToken)
+ resp, err := s.client.Do(req, &t)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return t, resp, err
+}
+
+// CreateImpersonationTokenOptions represents the available
+// CreateImpersonationToken() options.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/users.html#create-an-impersonation-token
+type CreateImpersonationTokenOptions struct {
+ Name *string `url:"name,omitempty" json:"name,omitempty"`
+ Scopes *[]string `url:"scopes,omitempty" json:"scopes,omitempty"`
+ ExpiresAt *time.Time `url:"expires_at,omitempty" json:"expires_at,omitempty"`
+}
+
+// CreateImpersonationToken creates an impersonation token.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/users.html#create-an-impersonation-token
+func (s *UsersService) CreateImpersonationToken(user int, opt *CreateImpersonationTokenOptions, options ...OptionFunc) (*ImpersonationToken, *Response, error) {
+ u := fmt.Sprintf("users/%d/impersonation_tokens", user)
+
+ req, err := s.client.NewRequest("POST", u, opt, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ t := new(ImpersonationToken)
+ resp, err := s.client.Do(req, &t)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return t, resp, err
+}
+
+// RevokeImpersonationToken revokes an impersonation token.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/users.html#revoke-an-impersonation-token
+func (s *UsersService) RevokeImpersonationToken(user, token int, options ...OptionFunc) (*Response, error) {
+ u := fmt.Sprintf("users/%d/impersonation_tokens/%d", user, token)
req, err := s.client.NewRequest("DELETE", u, nil, options)
if err != nil {
diff --git a/vendor/github.com/xanzy/go-gitlab/version.go b/vendor/github.com/xanzy/go-gitlab/version.go
new file mode 100644
index 0000000..f1a3a7f
--- /dev/null
+++ b/vendor/github.com/xanzy/go-gitlab/version.go
@@ -0,0 +1,56 @@
+//
+// Copyright 2017, Andrea Funto'
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+
+package gitlab
+
+// VersionService handles communication with the GitLab server instance to
+// retrieve its version information via the GitLab API.
+//
+// GitLab API docs: https://docs.gitlab.com/ce/api/version.md
+type VersionService struct {
+ client *Client
+}
+
+// Version represents a GitLab instance version.
+//
+// GitLab API docs: https://docs.gitlab.com/ce/api/version.md
+type Version struct {
+ Version string `json:"version"`
+ Revision string `json:"revision"`
+}
+
+func (s Version) String() string {
+ return Stringify(s)
+}
+
+// GetVersion gets a GitLab server instance version; it is only available to
+// authenticated users.
+//
+// GitLab API docs: https://docs.gitlab.com/ce/api/version.md
+func (s *VersionService) GetVersion() (*Version, *Response, error) {
+ req, err := s.client.NewRequest("GET", "version", nil, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ v := new(Version)
+ resp, err := s.client.Do(req, v)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return v, resp, err
+}
diff --git a/vendor/github.com/xanzy/go-gitlab/wikis.go b/vendor/github.com/xanzy/go-gitlab/wikis.go
new file mode 100644
index 0000000..7288985
--- /dev/null
+++ b/vendor/github.com/xanzy/go-gitlab/wikis.go
@@ -0,0 +1,204 @@
+// Copyright 2017, Stany MARCEL
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package gitlab
+
+import (
+ "fmt"
+ "net/url"
+)
+
+// WikisService handles communication with the wikis related methods of
+// the Gitlab API.
+//
+// GitLab API docs: https://docs.gitlab.com/ce/api/wikis.html
+type WikisService struct {
+ client *Client
+}
+
+// WikiFormat represents the available wiki formats.
+//
+// GitLab API docs: https://docs.gitlab.com/ce/api/wikis.html
+type WikiFormat string
+
+// The available wiki formats.
+const (
+ WikiFormatMarkdown WikiFormat = "markdown"
+ WikiFormatRFoc WikiFormat = "rdoc"
+ WikiFormatASCIIDoc WikiFormat = "asciidoc"
+)
+
+// Wiki represents a GitLab wiki.
+//
+// GitLab API docs: https://docs.gitlab.com/ce/api/wikis.html
+type Wiki struct {
+ Content string `json:"content"`
+ Format WikiFormat `json:"format"`
+ Slug string `json:"slug"`
+ Title string `json:"title"`
+}
+
+func (w Wiki) String() string {
+ return Stringify(w)
+}
+
+// ListWikisOptions represents the available ListWikis options.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/wikis.html#list-wiki-pages
+type ListWikisOptions struct {
+ WithContent *bool `url:"with_content,omitempty" json:"with_content,omitempty"`
+}
+
+// ListWikis lists all pages of the wiki of the given project id.
+// When with_content is set, it also returns the content of the pages.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/wikis.html#list-wiki-pages
+func (s *WikisService) ListWikis(pid interface{}, opt *ListWikisOptions, options ...OptionFunc) ([]*Wiki, *Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, nil, err
+ }
+ u := fmt.Sprintf("projects/%s/wikis", url.QueryEscape(project))
+
+ req, err := s.client.NewRequest("GET", u, opt, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var w []*Wiki
+ resp, err := s.client.Do(req, &w)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return w, resp, err
+}
+
+// GetWikiPage gets a wiki page for a given project.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/wikis.html#get-a-wiki-page
+func (s *WikisService) GetWikiPage(pid interface{}, slug string, options ...OptionFunc) (*Wiki, *Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, nil, err
+ }
+ u := fmt.Sprintf("projects/%s/wikis/%s", url.QueryEscape(project), url.QueryEscape(slug))
+
+ req, err := s.client.NewRequest("GET", u, nil, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var w *Wiki
+ resp, err := s.client.Do(req, &w)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return w, resp, err
+}
+
+// CreateWikiPageOptions represents options to CreateWikiPage.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/wikis.html#create-a-new-wiki-page
+type CreateWikiPageOptions struct {
+ Content *string `url:"content" json:"content"`
+ Title *string `url:"title" json:"title"`
+ Format *string `url:"format,omitempty" json:"format,omitempty"`
+}
+
+// CreateWikiPage creates a new wiki page for the given repository with
+// the given title, slug, and content.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/wikis.html#create-a-new-wiki-page
+func (s *WikisService) CreateWikiPage(pid interface{}, opt *CreateWikiPageOptions, options ...OptionFunc) (*Wiki, *Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, nil, err
+ }
+ u := fmt.Sprintf("projects/%s/wikis", url.QueryEscape(project))
+
+ req, err := s.client.NewRequest("POST", u, opt, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ w := new(Wiki)
+ resp, err := s.client.Do(req, w)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return w, resp, err
+}
+
+// EditWikiPageOptions represents options to EditWikiPage.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/wikis.html#edit-an-existing-wiki-page
+type EditWikiPageOptions struct {
+ Content *string `url:"content" json:"content"`
+ Title *string `url:"title" json:"title"`
+ Format *string `url:"format,omitempty" json:"format,omitempty"`
+}
+
+// EditWikiPage Updates an existing wiki page. At least one parameter is
+// required to update the wiki page.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/wikis.html#edit-an-existing-wiki-page
+func (s *WikisService) EditWikiPage(pid interface{}, slug string, opt *EditWikiPageOptions, options ...OptionFunc) (*Wiki, *Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, nil, err
+ }
+ u := fmt.Sprintf("projects/%s/wikis/%s", url.QueryEscape(project), url.QueryEscape(slug))
+
+ req, err := s.client.NewRequest("PUT", u, opt, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ w := new(Wiki)
+ resp, err := s.client.Do(req, w)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return w, resp, err
+}
+
+// DeleteWikiPage deletes a wiki page with a given slug.
+//
+// GitLab API docs:
+// https://docs.gitlab.com/ce/api/wikis.html#delete-a-wiki-page
+func (s *WikisService) DeleteWikiPage(pid interface{}, slug string, options ...OptionFunc) (*Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, err
+ }
+ u := fmt.Sprintf("projects/%s/wikis/%s", url.QueryEscape(project), url.QueryEscape(slug))
+
+ req, err := s.client.NewRequest("DELETE", u, nil, options)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}