aboutsummaryrefslogtreecommitdiff
path: root/vendor/github.com
diff options
context:
space:
mode:
authorNiall Sheridan <nsheridan@gmail.com>2017-04-10 21:18:42 +0100
committerNiall Sheridan <nsheridan@gmail.com>2017-04-10 21:38:33 +0100
commit30802e07b2d84fbc213b490d3402707dffe60096 (patch)
tree934aecb8f3582325dfd1aa6652193adac87d00db /vendor/github.com
parentda7638dc112c4c106e8929601b642d2ca4596cba (diff)
update dependencies
Diffstat (limited to 'vendor/github.com')
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/config.go21
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/context.go71
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go41
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go9
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go21
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go2
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go159
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go1
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go110
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go2
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go11
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go66
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/request.go128
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go14
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go14
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go154
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go31
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go94
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go293
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/session/doc.go93
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go20
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/session/session.go178
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go7
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go27
-rw-r--r--vendor/github.com/aws/aws-sdk-go/aws/version.go2
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go7
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go46
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go29
-rw-r--r--vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go134
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/api.go1594
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/errors.go2
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/service.go2
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go64
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go207
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/sts/api.go136
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/sts/errors.go2
-rw-r--r--vendor/github.com/aws/aws-sdk-go/service/sts/service.go2
-rw-r--r--vendor/github.com/fsnotify/fsnotify/inotify.go57
-rw-r--r--vendor/github.com/go-ini/ini/ini.go62
-rw-r--r--vendor/github.com/go-ini/ini/key.go148
-rw-r--r--vendor/github.com/go-ini/ini/parser.go26
-rw-r--r--vendor/github.com/go-ini/ini/section.go28
-rw-r--r--vendor/github.com/go-ini/ini/struct.go45
-rw-r--r--vendor/github.com/go-sql-driver/mysql/AUTHORS2
-rw-r--r--vendor/github.com/go-sql-driver/mysql/README.md18
-rw-r--r--vendor/github.com/go-sql-driver/mysql/connection.go34
-rw-r--r--vendor/github.com/go-sql-driver/mysql/packets.go60
-rw-r--r--vendor/github.com/go-sql-driver/mysql/rows.go122
-rw-r--r--vendor/github.com/go-sql-driver/mysql/statement.go56
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile39
-rw-r--r--vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go2065
-rw-r--r--vendor/github.com/google/go-github/github/activity.go6
-rw-r--r--vendor/github.com/google/go-github/github/activity_events.go59
-rw-r--r--vendor/github.com/google/go-github/github/activity_notifications.go57
-rw-r--r--vendor/github.com/google/go-github/github/activity_star.go27
-rw-r--r--vendor/github.com/google/go-github/github/activity_watching.go35
-rw-r--r--vendor/github.com/google/go-github/github/admin.go13
-rw-r--r--vendor/github.com/google/go-github/github/authorizations.go67
-rw-r--r--vendor/github.com/google/go-github/github/doc.go28
-rw-r--r--vendor/github.com/google/go-github/github/event_types.go129
-rw-r--r--vendor/github.com/google/go-github/github/gen-accessors.go299
-rw-r--r--vendor/github.com/google/go-github/github/gists.go65
-rw-r--r--vendor/github.com/google/go-github/github/gists_comments.go21
-rw-r--r--vendor/github.com/google/go-github/github/git_blobs.go13
-rw-r--r--vendor/github.com/google/go-github/github/git_commits.go42
-rw-r--r--vendor/github.com/google/go-github/github/git_refs.go21
-rw-r--r--vendor/github.com/google/go-github/github/git_tags.go9
-rw-r--r--vendor/github.com/google/go-github/github/git_trees.go13
-rw-r--r--vendor/github.com/google/go-github/github/github-accessors.go7277
-rw-r--r--vendor/github.com/google/go-github/github/github.go32
-rw-r--r--vendor/github.com/google/go-github/github/gitignore.go17
-rw-r--r--vendor/github.com/google/go-github/github/integration.go6
-rw-r--r--vendor/github.com/google/go-github/github/integration_installation.go7
-rw-r--r--vendor/github.com/google/go-github/github/issues.go39
-rw-r--r--vendor/github.com/google/go-github/github/issues_assignees.go21
-rw-r--r--vendor/github.com/google/go-github/github/issues_comments.go21
-rw-r--r--vendor/github.com/google/go-github/github/issues_events.go14
-rw-r--r--vendor/github.com/google/go-github/github/issues_labels.go50
-rw-r--r--vendor/github.com/google/go-github/github/issues_milestones.go29
-rw-r--r--vendor/github.com/google/go-github/github/issues_timeline.go5
-rw-r--r--vendor/github.com/google/go-github/github/licenses.go13
-rw-r--r--vendor/github.com/google/go-github/github/messages.go13
-rw-r--r--vendor/github.com/google/go-github/github/migrations.go25
-rw-r--r--vendor/github.com/google/go-github/github/migrations_source_import.go37
-rw-r--r--vendor/github.com/google/go-github/github/misc.go25
-rw-r--r--vendor/github.com/google/go-github/github/orgs.go17
-rw-r--r--vendor/github.com/google/go-github/github/orgs_hooks.go29
-rw-r--r--vendor/github.com/google/go-github/github/orgs_members.go57
-rw-r--r--vendor/github.com/google/go-github/github/orgs_outside_collaborators.go50
-rw-r--r--vendor/github.com/google/go-github/github/orgs_projects.go60
-rw-r--r--vendor/github.com/google/go-github/github/orgs_teams.go72
-rw-r--r--vendor/github.com/google/go-github/github/projects.go65
-rw-r--r--vendor/github.com/google/go-github/github/pulls.go139
-rw-r--r--vendor/github.com/google/go-github/github/pulls_comments.go21
-rw-r--r--vendor/github.com/google/go-github/github/pulls_reviewers.go84
-rw-r--r--vendor/github.com/google/go-github/github/pulls_reviews.go29
-rw-r--r--vendor/github.com/google/go-github/github/reactions.go41
-rw-r--r--vendor/github.com/google/go-github/github/repos.go120
-rw-r--r--vendor/github.com/google/go-github/github/repos_collaborators.go36
-rw-r--r--vendor/github.com/google/go-github/github/repos_comments.go25
-rw-r--r--vendor/github.com/google/go-github/github/repos_commits.go17
-rw-r--r--vendor/github.com/google/go-github/github/repos_contents.go34
-rw-r--r--vendor/github.com/google/go-github/github/repos_deployments.go25
-rw-r--r--vendor/github.com/google/go-github/github/repos_forks.go13
-rw-r--r--vendor/github.com/google/go-github/github/repos_hooks.go29
-rw-r--r--vendor/github.com/google/go-github/github/repos_invitations.go23
-rw-r--r--vendor/github.com/google/go-github/github/repos_keys.go25
-rw-r--r--vendor/github.com/google/go-github/github/repos_merging.go5
-rw-r--r--vendor/github.com/google/go-github/github/repos_pages.go25
-rw-r--r--vendor/github.com/google/go-github/github/repos_projects.go26
-rw-r--r--vendor/github.com/google/go-github/github/repos_releases.go76
-rw-r--r--vendor/github.com/google/go-github/github/repos_stats.go31
-rw-r--r--vendor/github.com/google/go-github/github/repos_statuses.go13
-rw-r--r--vendor/github.com/google/go-github/github/repos_traffic.go21
-rw-r--r--vendor/github.com/google/go-github/github/search.go51
-rw-r--r--vendor/github.com/google/go-github/github/users.go33
-rw-r--r--vendor/github.com/google/go-github/github/users_administration.go21
-rw-r--r--vendor/github.com/google/go-github/github/users_blocking.go91
-rw-r--r--vendor/github.com/google/go-github/github/users_emails.go14
-rw-r--r--vendor/github.com/google/go-github/github/users_followers.go25
-rw-r--r--vendor/github.com/google/go-github/github/users_gpg_keys.go37
-rw-r--r--vendor/github.com/google/go-github/github/users_keys.go21
-rw-r--r--vendor/github.com/google/go-github/github/without_appengine.go19
-rw-r--r--vendor/github.com/googleapis/gax-go/call_option.go13
-rw-r--r--vendor/github.com/googleapis/gax-go/header.go24
-rw-r--r--vendor/github.com/googleapis/gax-go/invoke.go4
-rw-r--r--vendor/github.com/gorilla/csrf/README.md2
-rw-r--r--vendor/github.com/gorilla/csrf/doc.go4
-rw-r--r--vendor/github.com/gorilla/handlers/README.md2
-rw-r--r--vendor/github.com/gorilla/mux/README.md1
-rw-r--r--vendor/github.com/gorilla/mux/route.go2
-rw-r--r--vendor/github.com/gorilla/securecookie/README.md2
-rw-r--r--vendor/github.com/gorilla/sessions/README.md9
-rw-r--r--vendor/github.com/hashicorp/vault/api/client.go24
-rw-r--r--vendor/github.com/hashicorp/vault/api/request.go1
-rw-r--r--vendor/github.com/hashicorp/vault/api/sys_mounts.go6
-rw-r--r--[-rwxr-xr-x]vendor/github.com/homemade/scl/scope.go0
-rw-r--r--vendor/github.com/homemade/scl/tokeniser.go10
-rw-r--r--vendor/github.com/jmoiron/sqlx/bind.go65
-rw-r--r--vendor/github.com/jmoiron/sqlx/named_context.go132
-rw-r--r--vendor/github.com/jmoiron/sqlx/reflectx/reflect.go2
-rw-r--r--vendor/github.com/jmoiron/sqlx/sqlx_context.go329
-rw-r--r--vendor/github.com/magiconair/properties/CHANGELOG.md10
-rw-r--r--vendor/github.com/magiconair/properties/README.md41
-rw-r--r--vendor/github.com/magiconair/properties/load.go9
-rw-r--r--vendor/github.com/magiconair/properties/properties.go31
-rw-r--r--vendor/github.com/mattn/go-sqlite3/README.md5
-rw-r--r--vendor/github.com/mattn/go-sqlite3/error.go1
-rw-r--r--vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c6561
-rw-r--r--vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h235
-rw-r--r--vendor/github.com/mattn/go-sqlite3/sqlite3.go66
-rw-r--r--vendor/github.com/mattn/go-sqlite3/sqlite3_context.go103
-rw-r--r--vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension.go1
-rw-r--r--vendor/github.com/mattn/go-sqlite3/sqlite3_trace.go (renamed from vendor/github.com/mattn/go-sqlite3/tracecallback.go)2
-rw-r--r--vendor/github.com/mattn/go-sqlite3/sqlite3_vtable.go646
-rw-r--r--vendor/github.com/mitchellh/mapstructure/mapstructure.go30
-rw-r--r--vendor/github.com/nsheridan/autocert-wkfs-cache/cache.go2
-rw-r--r--vendor/github.com/pelletier/go-buffruneio/buffruneio.go25
-rw-r--r--vendor/github.com/pelletier/go-toml/lexer.go8
-rw-r--r--vendor/github.com/pelletier/go-toml/marshal.go479
-rw-r--r--vendor/github.com/pelletier/go-toml/marshal_test.toml38
-rwxr-xr-xvendor/github.com/pelletier/go-toml/test.sh7
-rw-r--r--vendor/github.com/pelletier/go-toml/token.go3
-rw-r--r--vendor/github.com/pelletier/go-toml/toml.go12
-rw-r--r--vendor/github.com/pelletier/go-toml/tomltree_conversions.go227
-rw-r--r--vendor/github.com/pelletier/go-toml/tomltree_create.go135
-rw-r--r--vendor/github.com/pelletier/go-toml/tomltree_write.go217
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/doc.go2
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/go_collector.go8
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/registry.go2
-rw-r--r--vendor/github.com/prometheus/client_golang/prometheus/value.go2
-rw-r--r--vendor/github.com/sethgrid/pester/main.go78
-rw-r--r--vendor/github.com/spf13/cast/Makefile38
-rw-r--r--vendor/github.com/spf13/cast/README.md3
-rw-r--r--vendor/github.com/spf13/cast/cast.go70
-rw-r--r--vendor/github.com/spf13/cast/caste.go735
-rw-r--r--vendor/github.com/spf13/pflag/flag.go61
-rw-r--r--vendor/github.com/spf13/viper/viper.go37
-rw-r--r--vendor/github.com/xanzy/go-gitlab/README.md7
-rw-r--r--vendor/github.com/xanzy/go-gitlab/branches.go73
-rw-r--r--vendor/github.com/xanzy/go-gitlab/build_variables.go26
-rw-r--r--vendor/github.com/xanzy/go-gitlab/builds.go28
-rw-r--r--vendor/github.com/xanzy/go-gitlab/commits.go176
-rw-r--r--vendor/github.com/xanzy/go-gitlab/deploy_keys.go19
-rw-r--r--vendor/github.com/xanzy/go-gitlab/events.go22
-rw-r--r--vendor/github.com/xanzy/go-gitlab/gitlab.go33
-rw-r--r--vendor/github.com/xanzy/go-gitlab/groups.go64
-rw-r--r--vendor/github.com/xanzy/go-gitlab/issues.go36
-rw-r--r--vendor/github.com/xanzy/go-gitlab/labels.go27
-rw-r--r--vendor/github.com/xanzy/go-gitlab/merge_requests.go28
-rw-r--r--vendor/github.com/xanzy/go-gitlab/milestones.go24
-rw-r--r--vendor/github.com/xanzy/go-gitlab/namespaces.go14
-rw-r--r--vendor/github.com/xanzy/go-gitlab/notes.go44
-rw-r--r--vendor/github.com/xanzy/go-gitlab/notifications.go19
-rw-r--r--vendor/github.com/xanzy/go-gitlab/pipelines.go22
-rw-r--r--vendor/github.com/xanzy/go-gitlab/project_snippets.go26
-rw-r--r--vendor/github.com/xanzy/go-gitlab/projects.go229
-rw-r--r--vendor/github.com/xanzy/go-gitlab/repositories.go32
-rw-r--r--vendor/github.com/xanzy/go-gitlab/repository_files.go25
-rw-r--r--vendor/github.com/xanzy/go-gitlab/services.go32
-rw-r--r--vendor/github.com/xanzy/go-gitlab/session.go12
-rw-r--r--vendor/github.com/xanzy/go-gitlab/settings.go12
-rw-r--r--vendor/github.com/xanzy/go-gitlab/system_hooks.go19
-rw-r--r--vendor/github.com/xanzy/go-gitlab/tags.go16
-rw-r--r--vendor/github.com/xanzy/go-gitlab/time_stats.go22
-rw-r--r--vendor/github.com/xanzy/go-gitlab/users.go88
206 files changed, 23454 insertions, 4894 deletions
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go
index d58b812..948e0a6 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/config.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go
@@ -22,9 +22,9 @@ type RequestRetryer interface{}
//
// // Create Session with MaxRetry configuration to be shared by multiple
// // service clients.
-// sess, err := session.NewSession(&aws.Config{
+// sess := session.Must(session.NewSession(&aws.Config{
// MaxRetries: aws.Int(3),
-// })
+// }))
//
// // Create S3 service client with a specific Region.
// svc := s3.New(sess, &aws.Config{
@@ -154,7 +154,8 @@ type Config struct {
// the EC2Metadata overriding the timeout for default credentials chain.
//
// Example:
- // sess, err := session.NewSession(aws.NewConfig().WithEC2MetadataDiableTimeoutOverride(true))
+ // sess := session.Must(session.NewSession(aws.NewConfig()
+ // .WithEC2MetadataDiableTimeoutOverride(true)))
//
// svc := s3.New(sess)
//
@@ -174,7 +175,7 @@ type Config struct {
//
// Only supported with.
//
- // sess, err := session.NewSession()
+ // sess := session.Must(session.NewSession())
//
// svc := s3.New(sess, &aws.Config{
// UseDualStack: aws.Bool(true),
@@ -186,13 +187,19 @@ type Config struct {
// request delays. This value should only be used for testing. To adjust
// the delay of a request see the aws/client.DefaultRetryer and
// aws/request.Retryer.
+ //
+ // SleepDelay will prevent any Context from being used for canceling retry
+ // delay of an API operation. It is recommended to not use SleepDelay at all
+ // and specify a Retryer instead.
SleepDelay func(time.Duration)
// DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests.
// Will default to false. This would only be used for empty directory names in s3 requests.
//
// Example:
- // sess, err := session.NewSession(&aws.Config{DisableRestProtocolURICleaning: aws.Bool(true))
+ // sess := session.Must(session.NewSession(&aws.Config{
+ // DisableRestProtocolURICleaning: aws.Bool(true),
+ // }))
//
// svc := s3.New(sess)
// out, err := svc.GetObject(&s3.GetObjectInput {
@@ -207,9 +214,9 @@ type Config struct {
//
// // Create Session with MaxRetry configuration to be shared by multiple
// // service clients.
-// sess, err := session.NewSession(aws.NewConfig().
+// sess := session.Must(session.NewSession(aws.NewConfig().
// WithMaxRetries(3),
-// )
+// ))
//
// // Create S3 service client with a specific Region.
// svc := s3.New(sess, aws.NewConfig().
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context.go b/vendor/github.com/aws/aws-sdk-go/aws/context.go
new file mode 100644
index 0000000..79f4268
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/context.go
@@ -0,0 +1,71 @@
+package aws
+
+import (
+ "time"
+)
+
+// Context is an copy of the Go v1.7 stdlib's context.Context interface.
+// It is represented as a SDK interface to enable you to use the "WithContext"
+// API methods with Go v1.6 and a Context type such as golang.org/x/net/context.
+//
+// See https://golang.org/pkg/context on how to use contexts.
+type Context interface {
+ // Deadline returns the time when work done on behalf of this context
+ // should be canceled. Deadline returns ok==false when no deadline is
+ // set. Successive calls to Deadline return the same results.
+ Deadline() (deadline time.Time, ok bool)
+
+ // Done returns a channel that's closed when work done on behalf of this
+ // context should be canceled. Done may return nil if this context can
+ // never be canceled. Successive calls to Done return the same value.
+ Done() <-chan struct{}
+
+ // Err returns a non-nil error value after Done is closed. Err returns
+ // Canceled if the context was canceled or DeadlineExceeded if the
+ // context's deadline passed. No other values for Err are defined.
+ // After Done is closed, successive calls to Err return the same value.
+ Err() error
+
+ // Value returns the value associated with this context for key, or nil
+ // if no value is associated with key. Successive calls to Value with
+ // the same key returns the same result.
+ //
+ // Use context values only for request-scoped data that transits
+ // processes and API boundaries, not for passing optional parameters to
+ // functions.
+ Value(key interface{}) interface{}
+}
+
+// BackgroundContext returns a context that will never be canceled, has no
+// values, and no deadline. This context is used by the SDK to provide
+// backwards compatibility with non-context API operations and functionality.
+//
+// Go 1.6 and before:
+// This context function is equivalent to context.Background in the Go stdlib.
+//
+// Go 1.7 and later:
+// The context returned will be the value returned by context.Background()
+//
+// See https://golang.org/pkg/context for more information on Contexts.
+func BackgroundContext() Context {
+ return backgroundCtx
+}
+
+// SleepWithContext will wait for the timer duration to expire, or the context
+// is canceled. Which ever happens first. If the context is canceled the Context's
+// error will be returned.
+//
+// Expects Context to always return a non-nil error if the Done channel is closed.
+func SleepWithContext(ctx Context, dur time.Duration) error {
+ t := time.NewTimer(dur)
+ defer t.Stop()
+
+ select {
+ case <-t.C:
+ break
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go
new file mode 100644
index 0000000..e8cf93d
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go
@@ -0,0 +1,41 @@
+// +build !go1.7
+
+package aws
+
+import "time"
+
+// An emptyCtx is a copy of the the Go 1.7 context.emptyCtx type. This
+// is copied to provide a 1.6 and 1.5 safe version of context that is compatible
+// with Go 1.7's Context.
+//
+// An emptyCtx is never canceled, has no values, and has no deadline. It is not
+// struct{}, since vars of this type must have distinct addresses.
+type emptyCtx int
+
+func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
+ return
+}
+
+func (*emptyCtx) Done() <-chan struct{} {
+ return nil
+}
+
+func (*emptyCtx) Err() error {
+ return nil
+}
+
+func (*emptyCtx) Value(key interface{}) interface{} {
+ return nil
+}
+
+func (e *emptyCtx) String() string {
+ switch e {
+ case backgroundCtx:
+ return "aws.BackgroundContext"
+ }
+ return "unknown empty Context"
+}
+
+var (
+ backgroundCtx = new(emptyCtx)
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go
new file mode 100644
index 0000000..064f75c
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go
@@ -0,0 +1,9 @@
+// +build go1.7
+
+package aws
+
+import "context"
+
+var (
+ backgroundCtx = context.Background()
+)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
index 8a7bafc..08a6665 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go
@@ -134,6 +134,16 @@ var SendHandler = request.NamedHandler{Name: "core.SendHandler", Fn: func(r *req
// Catch all other request errors.
r.Error = awserr.New("RequestError", "send request failed", err)
r.Retryable = aws.Bool(true) // network errors are retryable
+
+ // Override the error with a context canceled error, if that was canceled.
+ ctx := r.Context()
+ select {
+ case <-ctx.Done():
+ r.Error = awserr.New(request.CanceledErrorCode,
+ "request context canceled", ctx.Err())
+ r.Retryable = aws.Bool(false)
+ default:
+ }
}
}}
@@ -156,7 +166,16 @@ var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn:
if r.WillRetry() {
r.RetryDelay = r.RetryRules(r)
- r.Config.SleepDelay(r.RetryDelay)
+
+ if sleepFn := r.Config.SleepDelay; sleepFn != nil {
+ // Support SleepDelay for backwards compatibility and testing
+ sleepFn(r.RetryDelay)
+ } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil {
+ r.Error = awserr.New(request.CanceledErrorCode,
+ "request context canceled", err)
+ r.Retryable = aws.Bool(false)
+ return
+ }
// when the expired token exception occurs the credentials
// need to be expired locally so that the next request to
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
index 7b8ebf5..c29baf0 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go
@@ -88,7 +88,7 @@ type Value struct {
// The Provider should not need to implement its own mutexes, because
// that will be managed by Credentials.
type Provider interface {
- // Refresh returns nil if it successfully retrieved the value.
+ // Retrieve returns nil if it successfully retrieved the value.
// Error is returned if the value were not obtainable, or empty.
Retrieve() (Value, error)
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
index 30c847a..b840623 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go
@@ -1,7 +1,81 @@
-// Package stscreds are credential Providers to retrieve STS AWS credentials.
-//
-// STS provides multiple ways to retrieve credentials which can be used when making
-// future AWS service API operation calls.
+/*
+Package stscreds are credential Providers to retrieve STS AWS credentials.
+
+STS provides multiple ways to retrieve credentials which can be used when making
+future AWS service API operation calls.
+
+The SDK will ensure that per instance of credentials.Credentials all requests
+to refresh the credentials will be synchronized. But, the SDK is unable to
+ensure synchronous usage of the AssumeRoleProvider if the value is shared
+between multiple Credentials, Sessions or service clients.
+
+Assume Role
+
+To assume an IAM role using STS with the SDK you can create a new Credentials
+with the SDKs's stscreds package.
+
+ // Initial credentials loaded from SDK's default credential chain. Such as
+ // the environment, shared credentials (~/.aws/credentials), or EC2 Instance
+ // Role. These credentials will be used to to make the STS Assume Role API.
+ sess := session.Must(session.NewSession())
+
+ // Create the credentials from AssumeRoleProvider to assume the role
+ // referenced by the "myRoleARN" ARN.
+ creds := stscreds.NewCredentials(sess, "myRoleArn")
+
+ // Create service client value configured for credentials
+ // from assumed role.
+ svc := s3.New(sess, &aws.Config{Credentials: creds})
+
+Assume Role with static MFA Token
+
+To assume an IAM role with a MFA token you can either specify a MFA token code
+directly or provide a function to prompt the user each time the credentials
+need to refresh the role's credentials. Specifying the TokenCode should be used
+for short lived operations that will not need to be refreshed, and when you do
+not want to have direct control over the user provides their MFA token.
+
+With TokenCode the AssumeRoleProvider will be not be able to refresh the role's
+credentials.
+
+ // Create the credentials from AssumeRoleProvider to assume the role
+ // referenced by the "myRoleARN" ARN using the MFA token code provided.
+ creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) {
+ p.SerialNumber = aws.String("myTokenSerialNumber")
+ p.TokenCode = aws.String("00000000")
+ })
+
+ // Create service client value configured for credentials
+ // from assumed role.
+ svc := s3.New(sess, &aws.Config{Credentials: creds})
+
+Assume Role with MFA Token Provider
+
+To assume an IAM role with MFA for longer running tasks where the credentials
+may need to be refreshed setting the TokenProvider field of AssumeRoleProvider
+will allow the credential provider to prompt for new MFA token code when the
+role's credentials need to be refreshed.
+
+The StdinTokenProvider function is available to prompt on stdin to retrieve
+the MFA token code from the user. You can also implement custom prompts by
+satisfing the TokenProvider function signature.
+
+Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will
+have undesirable results as the StdinTokenProvider will not be synchronized. A
+single Credentials with an AssumeRoleProvider can be shared safely.
+
+ // Create the credentials from AssumeRoleProvider to assume the role
+ // referenced by the "myRoleARN" ARN. Prompting for MFA token from stdin.
+ creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) {
+ p.SerialNumber = aws.String("myTokenSerialNumber")
+ p.TokenProvider = stscreds.StdinTokenProvider
+ })
+
+ // Create service client value configured for credentials
+ // from assumed role.
+ svc := s3.New(sess, &aws.Config{Credentials: creds})
+
+*/
package stscreds
import (
@@ -9,11 +83,31 @@ import (
"time"
"github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/service/sts"
)
+// StdinTokenProvider will prompt on stdout and read from stdin for a string value.
+// An error is returned if reading from stdin fails.
+//
+// Use this function go read MFA tokens from stdin. The function makes no attempt
+// to make atomic prompts from stdin across multiple gorouties.
+//
+// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will
+// have undesirable results as the StdinTokenProvider will not be synchronized. A
+// single Credentials with an AssumeRoleProvider can be shared safely
+//
+// Will wait forever until something is provided on the stdin.
+func StdinTokenProvider() (string, error) {
+ var v string
+ fmt.Printf("Assume Role MFA token code: ")
+ _, err := fmt.Scanln(&v)
+
+ return v, err
+}
+
// ProviderName provides a name of AssumeRole provider
const ProviderName = "AssumeRoleProvider"
@@ -27,8 +121,15 @@ type AssumeRoler interface {
var DefaultDuration = time.Duration(15) * time.Minute
// AssumeRoleProvider retrieves temporary credentials from the STS service, and
-// keeps track of their expiration time. This provider must be used explicitly,
-// as it is not included in the credentials chain.
+// keeps track of their expiration time.
+//
+// This credential provider will be used by the SDKs default credential change
+// when shared configuration is enabled, and the shared config or shared credentials
+// file configure assume role. See Session docs for how to do this.
+//
+// AssumeRoleProvider does not provide any synchronization and it is not safe
+// to share this value across multiple Credentials, Sessions, or service clients
+// without also sharing the same Credentials instance.
type AssumeRoleProvider struct {
credentials.Expiry
@@ -65,8 +166,23 @@ type AssumeRoleProvider struct {
// assumed requires MFA (that is, if the policy includes a condition that tests
// for MFA). If the role being assumed requires MFA and if the TokenCode value
// is missing or expired, the AssumeRole call returns an "access denied" error.
+ //
+ // If SerialNumber is set and neither TokenCode nor TokenProvider are also
+ // set an error will be returned.
TokenCode *string
+ // Async method of providing MFA token code for assuming an IAM role with MFA.
+ // The value returned by the function will be used as the TokenCode in the Retrieve
+ // call. See StdinTokenProvider for a provider that prompts and reads from stdin.
+ //
+ // This token provider will be called when ever the assumed role's
+ // credentials need to be refreshed when SerialNumber is also set and
+ // TokenCode is not set.
+ //
+ // If both TokenCode and TokenProvider is set, TokenProvider will be used and
+ // TokenCode is ignored.
+ TokenProvider func() (string, error)
+
// ExpiryWindow will allow the credentials to trigger refreshing prior to
// the credentials actually expiring. This is beneficial so race conditions
// with expiring credentials do not cause request to fail unexpectedly
@@ -85,6 +201,10 @@ type AssumeRoleProvider struct {
//
// Takes a Config provider to create the STS client. The ConfigProvider is
// satisfied by the session.Session type.
+//
+// It is safe to share the returned Credentials with multiple Sessions and
+// service clients. All access to the credentials and refreshing them
+// will be synchronized.
func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
p := &AssumeRoleProvider{
Client: sts.New(c),
@@ -103,7 +223,11 @@ func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*As
// AssumeRoleProvider. The credentials will expire every 15 minutes and the
// role will be named after a nanosecond timestamp of this operation.
//
-// Takes an AssumeRoler which can be satisfiede by the STS client.
+// Takes an AssumeRoler which can be satisfied by the STS client.
+//
+// It is safe to share the returned Credentials with multiple Sessions and
+// service clients. All access to the credentials and refreshing them
+// will be synchronized.
func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials {
p := &AssumeRoleProvider{
Client: svc,
@@ -139,12 +263,25 @@ func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) {
if p.Policy != nil {
input.Policy = p.Policy
}
- if p.SerialNumber != nil && p.TokenCode != nil {
- input.SerialNumber = p.SerialNumber
- input.TokenCode = p.TokenCode
+ if p.SerialNumber != nil {
+ if p.TokenCode != nil {
+ input.SerialNumber = p.SerialNumber
+ input.TokenCode = p.TokenCode
+ } else if p.TokenProvider != nil {
+ input.SerialNumber = p.SerialNumber
+ code, err := p.TokenProvider()
+ if err != nil {
+ return credentials.Value{ProviderName: ProviderName}, err
+ }
+ input.TokenCode = aws.String(code)
+ } else {
+ return credentials.Value{ProviderName: ProviderName},
+ awserr.New("AssumeRoleTokenNotAvailable",
+ "assume role with MFA enabled, but neither TokenCode nor TokenProvider are set", nil)
+ }
}
- roleOutput, err := p.Client.AssumeRole(input)
+ roleOutput, err := p.Client.AssumeRole(input)
if err != nil {
return credentials.Value{ProviderName: ProviderName}, err
}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
index 0ef5504..110ca83 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go
@@ -56,7 +56,6 @@ func Config() *aws.Config {
WithMaxRetries(aws.UseServiceDefaultRetries).
WithLogger(aws.NewDefaultLogger()).
WithLogLevel(aws.LogOff).
- WithSleepDelay(time.Sleep).
WithEndpointResolver(endpoints.DefaultResolver())
}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
index 5361656..4adca3a 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
@@ -1,4 +1,4 @@
-// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
+// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT.
package endpoints
@@ -104,8 +104,10 @@ const (
MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace.
MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics.
MonitoringServiceID = "monitoring" // Monitoring.
+ MturkRequesterServiceID = "mturk-requester" // MturkRequester.
OpsworksServiceID = "opsworks" // Opsworks.
OpsworksCmServiceID = "opsworks-cm" // OpsworksCm.
+ OrganizationsServiceID = "organizations" // Organizations.
PinpointServiceID = "pinpoint" // Pinpoint.
PollyServiceID = "polly" // Polly.
RdsServiceID = "rds" // Rds.
@@ -129,8 +131,10 @@ const (
StsServiceID = "sts" // Sts.
SupportServiceID = "support" // Support.
SwfServiceID = "swf" // Swf.
+ TaggingServiceID = "tagging" // Tagging.
WafServiceID = "waf" // Waf.
WafRegionalServiceID = "waf-regional" // WafRegional.
+ WorkdocsServiceID = "workdocs" // Workdocs.
WorkspacesServiceID = "workspaces" // Workspaces.
XrayServiceID = "xray" // Xray.
)
@@ -246,6 +250,7 @@ var awsPartition = partition{
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
@@ -432,10 +437,14 @@ var awsPartition = partition{
"codebuild": service{
Endpoints: endpoints{
- "eu-west-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-2": endpoint{},
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
},
},
"codecommit": service{
@@ -488,6 +497,7 @@ var awsPartition = partition{
"ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-2": endpoint{},
@@ -501,6 +511,7 @@ var awsPartition = partition{
"ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-2": endpoint{},
@@ -514,6 +525,7 @@ var awsPartition = partition{
"ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-2": endpoint{},
@@ -749,10 +761,11 @@ var awsPartition = partition{
"elasticfilesystem": service{
Endpoints: endpoints{
- "eu-west-1": endpoint{},
- "us-east-1": endpoint{},
- "us-east-2": endpoint{},
- "us-west-2": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-2": endpoint{},
},
},
"elasticloadbalancing": service{
@@ -848,6 +861,7 @@ var awsPartition = partition{
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
@@ -958,6 +972,7 @@ var awsPartition = partition{
"ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-2": endpoint{},
@@ -1014,6 +1029,7 @@ var awsPartition = partition{
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
@@ -1075,10 +1091,13 @@ var awsPartition = partition{
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
"sa-east-1": endpoint{},
"us-east-1": endpoint{},
+ "us-east-2": endpoint{},
"us-west-1": endpoint{},
"us-west-2": endpoint{},
},
@@ -1110,6 +1129,16 @@ var awsPartition = partition{
"us-west-2": endpoint{},
},
},
+ "mturk-requester": service{
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "sandbox": endpoint{
+ Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com",
+ },
+ "us-east-1": endpoint{},
+ },
+ },
"opsworks": service{
Endpoints: endpoints{
@@ -1136,6 +1165,19 @@ var awsPartition = partition{
"us-west-2": endpoint{},
},
},
+ "organizations": service{
+ PartitionEndpoint: "aws-global",
+ IsRegionalized: boxedFalse,
+
+ Endpoints: endpoints{
+ "aws-global": endpoint{
+ Hostname: "organizations.us-east-1.amazonaws.com",
+ CredentialScope: credentialScope{
+ Region: "us-east-1",
+ },
+ },
+ },
+ },
"pinpoint": service{
Defaults: endpoint{
CredentialScope: credentialScope{
@@ -1346,7 +1388,6 @@ var awsPartition = partition{
Endpoints: endpoints{
"ap-south-1": endpoint{},
"ap-southeast-2": endpoint{},
- "ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
@@ -1421,6 +1462,7 @@ var awsPartition = partition{
Endpoints: endpoints{
"ap-northeast-1": endpoint{},
+ "eu-central-1": endpoint{},
"eu-west-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
@@ -1532,6 +1574,25 @@ var awsPartition = partition{
"us-west-2": endpoint{},
},
},
+ "tagging": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-northeast-2": endpoint{},
+ "ap-south-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "ca-central-1": endpoint{},
+ "eu-central-1": endpoint{},
+ "eu-west-1": endpoint{},
+ "eu-west-2": endpoint{},
+ "sa-east-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-east-2": endpoint{},
+ "us-west-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
"waf": service{
PartitionEndpoint: "aws-global",
IsRegionalized: boxedFalse,
@@ -1554,6 +1615,17 @@ var awsPartition = partition{
"us-west-2": endpoint{},
},
},
+ "workdocs": service{
+
+ Endpoints: endpoints{
+ "ap-northeast-1": endpoint{},
+ "ap-southeast-1": endpoint{},
+ "ap-southeast-2": endpoint{},
+ "eu-west-1": endpoint{},
+ "us-east-1": endpoint{},
+ "us-west-2": endpoint{},
+ },
+ },
"workspaces": service{
Endpoints: endpoints{
@@ -1632,6 +1704,12 @@ var awscnPartition = partition{
"cn-north-1": endpoint{},
},
},
+ "codedeploy": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
"config": service{
Endpoints: endpoints{
@@ -1813,6 +1891,12 @@ var awscnPartition = partition{
"cn-north-1": endpoint{},
},
},
+ "tagging": service{
+
+ Endpoints: endpoints{
+ "cn-north-1": endpoint{},
+ },
+ },
},
}
@@ -1946,6 +2030,12 @@ var awsusgovPartition = partition{
},
},
},
+ "kinesis": service{
+
+ Endpoints: endpoints{
+ "us-gov-west-1": endpoint{},
+ },
+ },
"kms": service{
Endpoints: endpoints{
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go
index 1e7369d..fc7eada 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go
@@ -158,7 +158,7 @@ var funcMap = template.FuncMap{
const v3Tmpl = `
{{ define "defaults" -}}
-// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
+// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT.
package endpoints
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go
new file mode 100644
index 0000000..a94f041
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go
@@ -0,0 +1,11 @@
+package aws
+
+// JSONValue is a representation of a grab bag type that will be marshaled
+// into a json string. This type can be used just like any other map.
+//
+// Example:
+// values := JSONValue{
+// "Foo": "Bar",
+// }
+// values["Baz"] = "Qux"
+type JSONValue map[string]interface{}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
index 5279c19..6c14336 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go
@@ -18,6 +18,7 @@ type Handlers struct {
UnmarshalError HandlerList
Retry HandlerList
AfterRetry HandlerList
+ Complete HandlerList
}
// Copy returns of this handler's lists.
@@ -33,6 +34,7 @@ func (h *Handlers) Copy() Handlers {
UnmarshalMeta: h.UnmarshalMeta.copy(),
Retry: h.Retry.copy(),
AfterRetry: h.AfterRetry.copy(),
+ Complete: h.Complete.copy(),
}
}
@@ -48,6 +50,7 @@ func (h *Handlers) Clear() {
h.ValidateResponse.Clear()
h.Retry.Clear()
h.AfterRetry.Clear()
+ h.Complete.Clear()
}
// A HandlerListRunItem represents an entry in the HandlerList which
@@ -85,13 +88,17 @@ func (l *HandlerList) copy() HandlerList {
n := HandlerList{
AfterEachFn: l.AfterEachFn,
}
- n.list = append([]NamedHandler{}, l.list...)
+ if len(l.list) == 0 {
+ return n
+ }
+
+ n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...)
return n
}
// Clear clears the handler list.
func (l *HandlerList) Clear() {
- l.list = []NamedHandler{}
+ l.list = l.list[0:0]
}
// Len returns the number of handlers in the list.
@@ -101,33 +108,54 @@ func (l *HandlerList) Len() int {
// PushBack pushes handler f to the back of the handler list.
func (l *HandlerList) PushBack(f func(*Request)) {
- l.list = append(l.list, NamedHandler{"__anonymous", f})
-}
-
-// PushFront pushes handler f to the front of the handler list.
-func (l *HandlerList) PushFront(f func(*Request)) {
- l.list = append([]NamedHandler{{"__anonymous", f}}, l.list...)
+ l.PushBackNamed(NamedHandler{"__anonymous", f})
}
// PushBackNamed pushes named handler f to the back of the handler list.
func (l *HandlerList) PushBackNamed(n NamedHandler) {
+ if cap(l.list) == 0 {
+ l.list = make([]NamedHandler, 0, 5)
+ }
l.list = append(l.list, n)
}
+// PushFront pushes handler f to the front of the handler list.
+func (l *HandlerList) PushFront(f func(*Request)) {
+ l.PushFrontNamed(NamedHandler{"__anonymous", f})
+}
+
// PushFrontNamed pushes named handler f to the front of the handler list.
func (l *HandlerList) PushFrontNamed(n NamedHandler) {
- l.list = append([]NamedHandler{n}, l.list...)
+ if cap(l.list) == len(l.list) {
+ // Allocating new list required
+ l.list = append([]NamedHandler{n}, l.list...)
+ } else {
+ // Enough room to prepend into list.
+ l.list = append(l.list, NamedHandler{})
+ copy(l.list[1:], l.list)
+ l.list[0] = n
+ }
}
// Remove removes a NamedHandler n
func (l *HandlerList) Remove(n NamedHandler) {
- newlist := []NamedHandler{}
- for _, m := range l.list {
- if m.Name != n.Name {
- newlist = append(newlist, m)
+ l.RemoveByName(n.Name)
+}
+
+// RemoveByName removes a NamedHandler by name.
+func (l *HandlerList) RemoveByName(name string) {
+ for i := 0; i < len(l.list); i++ {
+ m := l.list[i]
+ if m.Name == name {
+ // Shift array preventing creating new arrays
+ copy(l.list[i:], l.list[i+1:])
+ l.list[len(l.list)-1] = NamedHandler{}
+ l.list = l.list[:len(l.list)-1]
+
+ // decrement list so next check to length is correct
+ i--
}
}
- l.list = newlist
}
// Run executes all handlers in the list with a given request object.
@@ -163,6 +191,16 @@ func HandlerListStopOnError(item HandlerListRunItem) bool {
return item.Request.Error == nil
}
+// WithAppendUserAgent will add a string to the user agent prefixed with a
+// single white space.
+func WithAppendUserAgent(s string) Option {
+ return func(r *Request) {
+ r.Handlers.Build.PushBack(func(r2 *Request) {
+ AddToUserAgent(r, s)
+ })
+ }
+}
+
// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request
// header. If the extra parameters are provided they will be added as metadata to the
// name/version pair resulting in the following format.
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
index 77312bb..1f131df 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go
@@ -16,6 +16,21 @@ import (
"github.com/aws/aws-sdk-go/aws/client/metadata"
)
+const (
+ // ErrCodeSerialization is the serialization error code that is received
+ // during protocol unmarshaling.
+ ErrCodeSerialization = "SerializationError"
+
+ // ErrCodeResponseTimeout is the connection timeout error that is recieved
+ // during body reads.
+ ErrCodeResponseTimeout = "ResponseTimeout"
+
+ // CanceledErrorCode is the error code that will be returned by an
+ // API request that was canceled. Requests given a aws.Context may
+ // return this error when canceled.
+ CanceledErrorCode = "RequestCanceled"
+)
+
// A Request is the service request to be made.
type Request struct {
Config aws.Config
@@ -41,12 +56,14 @@ type Request struct {
SignedHeaderVals http.Header
LastSignedAt time.Time
+ context aws.Context
+
built bool
- // Need to persist an intermideant body betweend the input Body and HTTP
+ // Need to persist an intermediate body between the input Body and HTTP
// request body because the HTTP Client's transport can maintain a reference
// to the HTTP request's body after the client has returned. This value is
- // safe to use concurrently and rewraps the input Body for each HTTP request.
+ // safe to use concurrently and wrap the input Body for each HTTP request.
safeBody *offsetReader
}
@@ -60,14 +77,6 @@ type Operation struct {
BeforePresignFn func(r *Request) error
}
-// Paginator keeps track of pagination configuration for an API operation.
-type Paginator struct {
- InputTokens []string
- OutputTokens []string
- LimitToken string
- TruncationToken string
-}
-
// New returns a new Request pointer for the service API
// operation and parameters.
//
@@ -111,6 +120,94 @@ func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers,
return r
}
+// A Option is a functional option that can augment or modify a request when
+// using a WithContext API operation method.
+type Option func(*Request)
+
+// WithGetResponseHeader builds a request Option which will retrieve a single
+// header value from the HTTP Response. If there are multiple values for the
+// header key use WithGetResponseHeaders instead to access the http.Header
+// map directly. The passed in val pointer must be non-nil.
+//
+// This Option can be used multiple times with a single API operation.
+//
+// var id2, versionID string
+// svc.PutObjectWithContext(ctx, params,
+// request.WithGetResponseHeader("x-amz-id-2", &id2),
+// request.WithGetResponseHeader("x-amz-version-id", &versionID),
+// )
+func WithGetResponseHeader(key string, val *string) Option {
+ return func(r *Request) {
+ r.Handlers.Complete.PushBack(func(req *Request) {
+ *val = req.HTTPResponse.Header.Get(key)
+ })
+ }
+}
+
+// WithGetResponseHeaders builds a request Option which will retrieve the
+// headers from the HTTP response and assign them to the passed in headers
+// variable. The passed in headers pointer must be non-nil.
+//
+// var headers http.Header
+// svc.PutObjectWithContext(ctx, params, request.WithGetResponseHeaders(&headers))
+func WithGetResponseHeaders(headers *http.Header) Option {
+ return func(r *Request) {
+ r.Handlers.Complete.PushBack(func(req *Request) {
+ *headers = req.HTTPResponse.Header
+ })
+ }
+}
+
+// WithLogLevel is a request option that will set the request to use a specific
+// log level when the request is made.
+//
+// svc.PutObjectWithContext(ctx, params, request.WithLogLevel(aws.LogDebugWithHTTPBody)
+func WithLogLevel(l aws.LogLevelType) Option {
+ return func(r *Request) {
+ r.Config.LogLevel = aws.LogLevel(l)
+ }
+}
+
+// ApplyOptions will apply each option to the request calling them in the order
+// the were provided.
+func (r *Request) ApplyOptions(opts ...Option) {
+ for _, opt := range opts {
+ opt(r)
+ }
+}
+
+// Context will always returns a non-nil context. If Request does not have a
+// context aws.BackgroundContext will be returned.
+func (r *Request) Context() aws.Context {
+ if r.context != nil {
+ return r.context
+ }
+ return aws.BackgroundContext()
+}
+
+// SetContext adds a Context to the current request that can be used to cancel
+// a in-flight request. The Context value must not be nil, or this method will
+// panic.
+//
+// Unlike http.Request.WithContext, SetContext does not return a copy of the
+// Request. It is not safe to use use a single Request value for multiple
+// requests. A new Request should be created for each API operation request.
+//
+// Go 1.6 and below:
+// The http.Request's Cancel field will be set to the Done() value of
+// the context. This will overwrite the Cancel field's value.
+//
+// Go 1.7 and above:
+// The http.Request.WithContext will be used to set the context on the underlying
+// http.Request. This will create a shallow copy of the http.Request. The SDK
+// may create sub contexts in the future for nested requests such as retries.
+func (r *Request) SetContext(ctx aws.Context) {
+ if ctx == nil {
+ panic("context cannot be nil")
+ }
+ setRequestContext(r, ctx)
+}
+
// WillRetry returns if the request's can be retried.
func (r *Request) WillRetry() bool {
return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries()
@@ -262,7 +359,7 @@ func (r *Request) ResetBody() {
// Related golang/go#18257
l, err := computeBodyLength(r.Body)
if err != nil {
- r.Error = awserr.New("SerializationError", "failed to compute request body size", err)
+ r.Error = awserr.New(ErrCodeSerialization, "failed to compute request body size", err)
return
}
@@ -344,6 +441,12 @@ func (r *Request) GetBody() io.ReadSeeker {
//
// Send will not close the request.Request's body.
func (r *Request) Send() error {
+ defer func() {
+ // Regardless of success or failure of the request trigger the Complete
+ // request handlers.
+ r.Handlers.Complete.Run(r)
+ }()
+
for {
if aws.BoolValue(r.Retryable) {
if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) {
@@ -446,6 +549,9 @@ func shouldRetryCancel(r *Request) bool {
timeoutErr := false
errStr := r.Error.Error()
if ok {
+ if awsErr.Code() == CanceledErrorCode {
+ return false
+ }
err := awsErr.OrigErr()
netErr, netOK := err.(net.Error)
timeoutErr = netOK && netErr.Temporary()
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go
new file mode 100644
index 0000000..a7365cd
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go
@@ -0,0 +1,14 @@
+// +build go1.7
+
+package request
+
+import "github.com/aws/aws-sdk-go/aws"
+
+// setContext updates the Request to use the passed in context for cancellation.
+// Context will also be used for request retry delay.
+//
+// Creates shallow copy of the http.Request with the WithContext method.
+func setRequestContext(r *Request, ctx aws.Context) {
+ r.context = ctx
+ r.HTTPRequest = r.HTTPRequest.WithContext(ctx)
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go
new file mode 100644
index 0000000..307fa07
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go
@@ -0,0 +1,14 @@
+// +build !go1.7
+
+package request
+
+import "github.com/aws/aws-sdk-go/aws"
+
+// setContext updates the Request to use the passed in context for cancellation.
+// Context will also be used for request retry delay.
+//
+// Creates shallow copy of the http.Request with the WithContext method.
+func setRequestContext(r *Request, ctx aws.Context) {
+ r.context = ctx
+ r.HTTPRequest.Cancel = ctx.Done()
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
index 2939ec4..59de673 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go
@@ -2,29 +2,125 @@ package request
import (
"reflect"
+ "sync/atomic"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awsutil"
)
-//type Paginater interface {
-// HasNextPage() bool
-// NextPage() *Request
-// EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error
-//}
+// A Pagination provides paginating of SDK API operations which are paginatable.
+// Generally you should not use this type directly, but use the "Pages" API
+// operations method to automatically perform pagination for you. Such as,
+// "S3.ListObjectsPages", and "S3.ListObjectsPagesWithContext" methods.
+//
+// Pagination differs from a Paginator type in that pagination is the type that
+// does the pagination between API operations, and Paginator defines the
+// configuration that will be used per page request.
+//
+// cont := true
+// for p.Next() && cont {
+// data := p.Page().(*s3.ListObjectsOutput)
+// // process the page's data
+// }
+// return p.Err()
+//
+// See service client API operation Pages methods for examples how the SDK will
+// use the Pagination type.
+type Pagination struct {
+ // Function to return a Request value for each pagination request.
+ // Any configuration or handlers that need to be applied to the request
+ // prior to getting the next page should be done here before the request
+ // returned.
+ //
+ // NewRequest should always be built from the same API operations. It is
+ // undefined if different API operations are returned on subsequent calls.
+ NewRequest func() (*Request, error)
-// HasNextPage returns true if this request has more pages of data available.
-func (r *Request) HasNextPage() bool {
- return len(r.nextPageTokens()) > 0
+ started bool
+ nextTokens []interface{}
+
+ err error
+ curPage interface{}
}
-// nextPageTokens returns the tokens to use when asking for the next page of
-// data.
+// HasNextPage will return true if Pagination is able to determine that the API
+// operation has additional pages. False will be returned if there are no more
+// pages remaining.
+//
+// Will always return true if Next has not been called yet.
+func (p *Pagination) HasNextPage() bool {
+ return !(p.started && len(p.nextTokens) == 0)
+}
+
+// Err returns the error Pagination encountered when retrieving the next page.
+func (p *Pagination) Err() error {
+ return p.err
+}
+
+// Page returns the current page. Page should only be called after a successful
+// call to Next. It is undefined what Page will return if Page is called after
+// Next returns false.
+func (p *Pagination) Page() interface{} {
+ return p.curPage
+}
+
+// Next will attempt to retrieve the next page for the API operation. When a page
+// is retrieved true will be returned. If the page cannot be retrieved, or there
+// are no more pages false will be returned.
+//
+// Use the Page method to retrieve the current page data. The data will need
+// to be cast to the API operation's output type.
+//
+// Use the Err method to determine if an error occurred if Page returns false.
+func (p *Pagination) Next() bool {
+ if !p.HasNextPage() {
+ return false
+ }
+
+ req, err := p.NewRequest()
+ if err != nil {
+ p.err = err
+ return false
+ }
+
+ if p.started {
+ for i, intok := range req.Operation.InputTokens {
+ awsutil.SetValueAtPath(req.Params, intok, p.nextTokens[i])
+ }
+ }
+ p.started = true
+
+ err = req.Send()
+ if err != nil {
+ p.err = err
+ return false
+ }
+
+ p.nextTokens = req.nextPageTokens()
+ p.curPage = req.Data
+
+ return true
+}
+
+// A Paginator is the configuration data that defines how an API operation
+// should be paginated. This type is used by the API service models to define
+// the generated pagination config for service APIs.
+//
+// The Pagination type is what provides iterating between pages of an API. It
+// is only used to store the token metadata the SDK should use for performing
+// pagination.
+type Paginator struct {
+ InputTokens []string
+ OutputTokens []string
+ LimitToken string
+ TruncationToken string
+}
+
+// nextPageTokens returns the tokens to use when asking for the next page of data.
func (r *Request) nextPageTokens() []interface{} {
if r.Operation.Paginator == nil {
return nil
}
-
if r.Operation.TruncationToken != "" {
tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken)
if len(tr) == 0 {
@@ -61,9 +157,40 @@ func (r *Request) nextPageTokens() []interface{} {
return tokens
}
+// Ensure a deprecated item is only logged once instead of each time its used.
+func logDeprecatedf(logger aws.Logger, flag *int32, msg string) {
+ if logger == nil {
+ return
+ }
+ if atomic.CompareAndSwapInt32(flag, 0, 1) {
+ logger.Log(msg)
+ }
+}
+
+var (
+ logDeprecatedHasNextPage int32
+ logDeprecatedNextPage int32
+ logDeprecatedEachPage int32
+)
+
+// HasNextPage returns true if this request has more pages of data available.
+//
+// Deprecated Use Pagination type for configurable pagination of API operations
+func (r *Request) HasNextPage() bool {
+ logDeprecatedf(r.Config.Logger, &logDeprecatedHasNextPage,
+ "Request.HasNextPage deprecated. Use Pagination type for configurable pagination of API operations")
+
+ return len(r.nextPageTokens()) > 0
+}
+
// NextPage returns a new Request that can be executed to return the next
// page of result data. Call .Send() on this request to execute it.
+//
+// Deprecated Use Pagination type for configurable pagination of API operations
func (r *Request) NextPage() *Request {
+ logDeprecatedf(r.Config.Logger, &logDeprecatedNextPage,
+ "Request.NextPage deprecated. Use Pagination type for configurable pagination of API operations")
+
tokens := r.nextPageTokens()
if len(tokens) == 0 {
return nil
@@ -90,7 +217,12 @@ func (r *Request) NextPage() *Request {
// as the structure "T". The lastPage value represents whether the page is
// the last page of data or not. The return value of this function should
// return true to keep iterating or false to stop.
+//
+// Deprecated Use Pagination type for configurable pagination of API operations
func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error {
+ logDeprecatedf(r.Config.Logger, &logDeprecatedEachPage,
+ "Request.EachPage deprecated. Use Pagination type for configurable pagination of API operations")
+
for page := r; page != nil; page = page.NextPage() {
if err := page.Send(); err != nil {
return err
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
index ebd60cc..632cd70 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go
@@ -1,6 +1,9 @@
package request
import (
+ "net"
+ "os"
+ "syscall"
"time"
"github.com/aws/aws-sdk-go/aws"
@@ -26,8 +29,10 @@ func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config {
// retryableCodes is a collection of service response codes which are retry-able
// without any further action.
var retryableCodes = map[string]struct{}{
- "RequestError": {},
- "RequestTimeout": {},
+ "RequestError": {},
+ "RequestTimeout": {},
+ ErrCodeResponseTimeout: {},
+ "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout
}
var throttleCodes = map[string]struct{}{
@@ -68,12 +73,32 @@ func isCodeExpiredCreds(code string) bool {
return ok
}
+func isSerializationErrorRetryable(err error) bool {
+ if err == nil {
+ return false
+ }
+
+ if aerr, ok := err.(awserr.Error); ok {
+ return isCodeRetryable(aerr.Code())
+ }
+
+ if opErr, ok := err.(*net.OpError); ok {
+ if sysErr, ok := opErr.Err.(*os.SyscallError); ok {
+ return sysErr.Err == syscall.ECONNRESET
+ }
+ }
+
+ return false
+}
+
// IsErrorRetryable returns whether the error is retryable, based on its Code.
// Returns false if the request has no Error set.
func (r *Request) IsErrorRetryable() bool {
if r.Error != nil {
- if err, ok := r.Error.(awserr.Error); ok {
+ if err, ok := r.Error.(awserr.Error); ok && err.Code() != ErrCodeSerialization {
return isCodeRetryable(err.Code())
+ } else if ok {
+ return isSerializationErrorRetryable(err.OrigErr())
}
}
return false
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go
new file mode 100644
index 0000000..09a44eb
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go
@@ -0,0 +1,94 @@
+package request
+
+import (
+ "io"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws/awserr"
+)
+
+var timeoutErr = awserr.New(
+ ErrCodeResponseTimeout,
+ "read on body has reached the timeout limit",
+ nil,
+)
+
+type readResult struct {
+ n int
+ err error
+}
+
+// timeoutReadCloser will handle body reads that take too long.
+// We will return a ErrReadTimeout error if a timeout occurs.
+type timeoutReadCloser struct {
+ reader io.ReadCloser
+ duration time.Duration
+}
+
+// Read will spin off a goroutine to call the reader's Read method. We will
+// select on the timer's channel or the read's channel. Whoever completes first
+// will be returned.
+func (r *timeoutReadCloser) Read(b []byte) (int, error) {
+ timer := time.NewTimer(r.duration)
+ c := make(chan readResult, 1)
+
+ go func() {
+ n, err := r.reader.Read(b)
+ timer.Stop()
+ c <- readResult{n: n, err: err}
+ }()
+
+ select {
+ case data := <-c:
+ return data.n, data.err
+ case <-timer.C:
+ return 0, timeoutErr
+ }
+}
+
+func (r *timeoutReadCloser) Close() error {
+ return r.reader.Close()
+}
+
+const (
+ // HandlerResponseTimeout is what we use to signify the name of the
+ // response timeout handler.
+ HandlerResponseTimeout = "ResponseTimeoutHandler"
+)
+
+// adaptToResponseTimeoutError is a handler that will replace any top level error
+// to a ErrCodeResponseTimeout, if its child is that.
+func adaptToResponseTimeoutError(req *Request) {
+ if err, ok := req.Error.(awserr.Error); ok {
+ aerr, ok := err.OrigErr().(awserr.Error)
+ if ok && aerr.Code() == ErrCodeResponseTimeout {
+ req.Error = aerr
+ }
+ }
+}
+
+// WithResponseReadTimeout is a request option that will wrap the body in a timeout read closer.
+// This will allow for per read timeouts. If a timeout occurred, we will return the
+// ErrCodeResponseTimeout.
+//
+// svc.PutObjectWithContext(ctx, params, request.WithTimeoutReadCloser(30 * time.Second)
+func WithResponseReadTimeout(duration time.Duration) Option {
+ return func(r *Request) {
+
+ var timeoutHandler = NamedHandler{
+ HandlerResponseTimeout,
+ func(req *Request) {
+ req.HTTPResponse.Body = &timeoutReadCloser{
+ reader: req.HTTPResponse.Body,
+ duration: duration,
+ }
+ }}
+
+ // remove the handler so we are not stomping over any new durations.
+ r.Handlers.Send.RemoveByName(HandlerResponseTimeout)
+ r.Handlers.Send.PushBackNamed(timeoutHandler)
+
+ r.Handlers.Unmarshal.PushBack(adaptToResponseTimeoutError)
+ r.Handlers.UnmarshalError.PushBack(adaptToResponseTimeoutError)
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go
new file mode 100644
index 0000000..354c381
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go
@@ -0,0 +1,293 @@
+package request
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/awserr"
+ "github.com/aws/aws-sdk-go/aws/awsutil"
+)
+
+// WaiterResourceNotReadyErrorCode is the error code returned by a waiter when
+// the waiter's max attempts have been exhausted.
+const WaiterResourceNotReadyErrorCode = "ResourceNotReady"
+
+// A WaiterOption is a function that will update the Waiter value's fields to
+// configure the waiter.
+type WaiterOption func(*Waiter)
+
+// WithWaiterMaxAttempts returns the maximum number of times the waiter should
+// attempt to check the resource for the target state.
+func WithWaiterMaxAttempts(max int) WaiterOption {
+ return func(w *Waiter) {
+ w.MaxAttempts = max
+ }
+}
+
+// WaiterDelay will return a delay the waiter should pause between attempts to
+// check the resource state. The passed in attempt is the number of times the
+// Waiter has checked the resource state.
+//
+// Attempt is the number of attempts the Waiter has made checking the resource
+// state.
+type WaiterDelay func(attempt int) time.Duration
+
+// ConstantWaiterDelay returns a WaiterDelay that will always return a constant
+// delay the waiter should use between attempts. It ignores the number of
+// attempts made.
+func ConstantWaiterDelay(delay time.Duration) WaiterDelay {
+ return func(attempt int) time.Duration {
+ return delay
+ }
+}
+
+// WithWaiterDelay will set the Waiter to use the WaiterDelay passed in.
+func WithWaiterDelay(delayer WaiterDelay) WaiterOption {
+ return func(w *Waiter) {
+ w.Delay = delayer
+ }
+}
+
+// WithWaiterLogger returns a waiter option to set the logger a waiter
+// should use to log warnings and errors to.
+func WithWaiterLogger(logger aws.Logger) WaiterOption {
+ return func(w *Waiter) {
+ w.Logger = logger
+ }
+}
+
+// WithWaiterRequestOptions returns a waiter option setting the request
+// options for each request the waiter makes. Appends to waiter's request
+// options already set.
+func WithWaiterRequestOptions(opts ...Option) WaiterOption {
+ return func(w *Waiter) {
+ w.RequestOptions = append(w.RequestOptions, opts...)
+ }
+}
+
+// A Waiter provides the functionality to performing blocking call which will
+// wait for an resource state to be satisfied a service.
+//
+// This type should not be used directly. The API operations provided in the
+// service packages prefixed with "WaitUntil" should be used instead.
+type Waiter struct {
+ Name string
+ Acceptors []WaiterAcceptor
+ Logger aws.Logger
+
+ MaxAttempts int
+ Delay WaiterDelay
+
+ RequestOptions []Option
+ NewRequest func([]Option) (*Request, error)
+}
+
+// ApplyOptions updates the waiter with the list of waiter options provided.
+func (w *Waiter) ApplyOptions(opts ...WaiterOption) {
+ for _, fn := range opts {
+ fn(w)
+ }
+}
+
+// WaiterState are states the waiter uses based on WaiterAcceptor definitions
+// to identify if the resource state the waiter is waiting on has occurred.
+type WaiterState int
+
+// String returns the string representation of the waiter state.
+func (s WaiterState) String() string {
+ switch s {
+ case SuccessWaiterState:
+ return "success"
+ case FailureWaiterState:
+ return "failure"
+ case RetryWaiterState:
+ return "retry"
+ default:
+ return "unknown waiter state"
+ }
+}
+
+// States the waiter acceptors will use to identify target resource states.
+const (
+ SuccessWaiterState WaiterState = iota // waiter successful
+ FailureWaiterState // waiter failed
+ RetryWaiterState // waiter needs to be retried
+)
+
+// WaiterMatchMode is the mode that the waiter will use to match the WaiterAcceptor
+// definition's Expected attribute.
+type WaiterMatchMode int
+
+// Modes the waiter will use when inspecting API response to identify target
+// resource states.
+const (
+ PathAllWaiterMatch WaiterMatchMode = iota // match on all paths
+ PathWaiterMatch // match on specific path
+ PathAnyWaiterMatch // match on any path
+ PathListWaiterMatch // match on list of paths
+ StatusWaiterMatch // match on status code
+ ErrorWaiterMatch // match on error
+)
+
+// String returns the string representation of the waiter match mode.
+func (m WaiterMatchMode) String() string {
+ switch m {
+ case PathAllWaiterMatch:
+ return "pathAll"
+ case PathWaiterMatch:
+ return "path"
+ case PathAnyWaiterMatch:
+ return "pathAny"
+ case PathListWaiterMatch:
+ return "pathList"
+ case StatusWaiterMatch:
+ return "status"
+ case ErrorWaiterMatch:
+ return "error"
+ default:
+ return "unknown waiter match mode"
+ }
+}
+
+// WaitWithContext will make requests for the API operation using NewRequest to
+// build API requests. The request's response will be compared against the
+// Waiter's Acceptors to determine the successful state of the resource the
+// waiter is inspecting.
+//
+// The passed in context must not be nil. If it is nil a panic will occur. The
+// Context will be used to cancel the waiter's pending requests and retry delays.
+// Use aws.BackgroundContext if no context is available.
+//
+// The waiter will continue until the target state defined by the Acceptors,
+// or the max attempts expires.
+//
+// Will return the WaiterResourceNotReadyErrorCode error code if the waiter's
+// retryer ShouldRetry returns false. This normally will happen when the max
+// wait attempts expires.
+func (w Waiter) WaitWithContext(ctx aws.Context) error {
+
+ for attempt := 1; ; attempt++ {
+ req, err := w.NewRequest(w.RequestOptions)
+ if err != nil {
+ waiterLogf(w.Logger, "unable to create request %v", err)
+ return err
+ }
+ req.Handlers.Build.PushBack(MakeAddToUserAgentFreeFormHandler("Waiter"))
+ err = req.Send()
+
+ // See if any of the acceptors match the request's response, or error
+ for _, a := range w.Acceptors {
+ var matched bool
+ matched, err = a.match(w.Name, w.Logger, req, err)
+ if err != nil {
+ // Error occurred during current waiter call
+ return err
+ } else if matched {
+ // Match was found can stop here and return
+ return nil
+ }
+ }
+
+ // The Waiter should only check the resource state MaxAttempts times
+ // This is here instead of in the for loop above to prevent delaying
+ // unnecessary when the waiter will not retry.
+ if attempt == w.MaxAttempts {
+ break
+ }
+
+ // Delay to wait before inspecting the resource again
+ delay := w.Delay(attempt)
+ if sleepFn := req.Config.SleepDelay; sleepFn != nil {
+ // Support SleepDelay for backwards compatibility and testing
+ sleepFn(delay)
+ } else if err := aws.SleepWithContext(ctx, delay); err != nil {
+ return awserr.New(CanceledErrorCode, "waiter context canceled", err)
+ }
+ }
+
+ return awserr.New(WaiterResourceNotReadyErrorCode, "exceeded wait attempts", nil)
+}
+
+// A WaiterAcceptor provides the information needed to wait for an API operation
+// to complete.
+type WaiterAcceptor struct {
+ State WaiterState
+ Matcher WaiterMatchMode
+ Argument string
+ Expected interface{}
+}
+
+// match returns if the acceptor found a match with the passed in request
+// or error. True is returned if the acceptor made a match, error is returned
+// if there was an error attempting to perform the match.
+func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err error) (bool, error) {
+ result := false
+ var vals []interface{}
+
+ switch a.Matcher {
+ case PathAllWaiterMatch, PathWaiterMatch:
+ // Require all matches to be equal for result to match
+ vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument)
+ if len(vals) == 0 {
+ break
+ }
+ result = true
+ for _, val := range vals {
+ if !awsutil.DeepEqual(val, a.Expected) {
+ result = false
+ break
+ }
+ }
+ case PathAnyWaiterMatch:
+ // Only a single match needs to equal for the result to match
+ vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument)
+ for _, val := range vals {
+ if awsutil.DeepEqual(val, a.Expected) {
+ result = true
+ break
+ }
+ }
+ case PathListWaiterMatch:
+ // ignored matcher
+ case StatusWaiterMatch:
+ s := a.Expected.(int)
+ result = s == req.HTTPResponse.StatusCode
+ case ErrorWaiterMatch:
+ if aerr, ok := err.(awserr.Error); ok {
+ result = aerr.Code() == a.Expected.(string)
+ }
+ default:
+ waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s",
+ name, a.Matcher)
+ }
+
+ if !result {
+ // If there was no matching result found there is nothing more to do
+ // for this response, retry the request.
+ return false, nil
+ }
+
+ switch a.State {
+ case SuccessWaiterState:
+ // waiter completed
+ return true, nil
+ case FailureWaiterState:
+ // Waiter failure state triggered
+ return false, awserr.New("ResourceNotReady",
+ "failed waiting for successful resource state", err)
+ case RetryWaiterState:
+ // clear the error and retry the operation
+ return false, nil
+ default:
+ waiterLogf(l, "WARNING: Waiter %s encountered unexpected state: %s",
+ name, a.State)
+ return false, nil
+ }
+}
+
+func waiterLogf(logger aws.Logger, msg string, args ...interface{}) {
+ if logger != nil {
+ logger.Log(fmt.Sprintf(msg, args...))
+ }
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
index d3dc840..2fe35e7 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go
@@ -23,7 +23,7 @@ additional config if the AWS_SDK_LOAD_CONFIG environment variable is set.
Alternatively you can explicitly create a Session with shared config enabled.
To do this you can use NewSessionWithOptions to configure how the Session will
be created. Using the NewSessionWithOptions with SharedConfigState set to
-SharedConfigEnabled will create the session as if the AWS_SDK_LOAD_CONFIG
+SharedConfigEnable will create the session as if the AWS_SDK_LOAD_CONFIG
environment variable was set.
Creating Sessions
@@ -45,16 +45,16 @@ region, and profile loaded from the environment and shared config automatically.
Requires the AWS_PROFILE to be set, or "default" is used.
// Create Session
- sess, err := session.NewSession()
+ sess := session.Must(session.NewSession())
// Create a Session with a custom region
- sess, err := session.NewSession(&aws.Config{Region: aws.String("us-east-1")})
+ sess := session.Must(session.NewSession(&aws.Config{
+ Region: aws.String("us-east-1"),
+ }))
// Create a S3 client instance from a session
- sess, err := session.NewSession()
- if err != nil {
- // Handle Session creation error
- }
+ sess := session.Must(session.NewSession())
+
svc := s3.New(sess)
Create Session With Option Overrides
@@ -67,23 +67,25 @@ Use NewSessionWithOptions when you want to provide the config profile, or
override the shared config state (AWS_SDK_LOAD_CONFIG).
// Equivalent to session.NewSession()
- sess, err := session.NewSessionWithOptions(session.Options{})
+ sess := session.Must(session.NewSessionWithOptions(session.Options{
+ // Options
+ }))
// Specify profile to load for the session's config
- sess, err := session.NewSessionWithOptions(session.Options{
+ sess := session.Must(session.NewSessionWithOptions(session.Options{
Profile: "profile_name",
- })
+ }))
// Specify profile for config and region for requests
- sess, err := session.NewSessionWithOptions(session.Options{
+ sess := session.Must(session.NewSessionWithOptions(session.Options{
Config: aws.Config{Region: aws.String("us-east-1")},
Profile: "profile_name",
- })
+ }))
// Force enable Shared Config support
- sess, err := session.NewSessionWithOptions(session.Options{
- SharedConfigState: SharedConfigEnable,
- })
+ sess := session.Must(session.NewSessionWithOptions(session.Options{
+ SharedConfigState: session.SharedConfigEnable,
+ }))
Adding Handlers
@@ -93,7 +95,8 @@ handler logs every request and its payload made by a service client:
// Create a session, and add additional handlers for all service
// clients created with the Session to inherit. Adds logging handler.
- sess, err := session.NewSession()
+ sess := session.Must(session.NewSession())
+
sess.Handlers.Send.PushFront(func(r *request.Request) {
// Log every request made and its payload
logger.Println("Request: %s/%s, Payload: %s",
@@ -138,15 +141,14 @@ the other two fields are also provided.
Assume Role values allow you to configure the SDK to assume an IAM role using
a set of credentials provided in a config file via the source_profile field.
-Both "role_arn" and "source_profile" are required. The SDK does not support
-assuming a role with MFA token Via the Session's constructor. You can use the
-stscreds.AssumeRoleProvider credentials provider to specify custom
-configuration and support for MFA.
+Both "role_arn" and "source_profile" are required. The SDK supports assuming
+a role with MFA token if the session option AssumeRoleTokenProvider
+is set.
role_arn = arn:aws:iam::<account_number>:role/<role_name>
source_profile = profile_with_creds
external_id = 1234
- mfa_serial = not supported!
+ mfa_serial = <serial or mfa arn>
role_session_name = session_name
Region is the region the SDK should use for looking up AWS service endpoints
@@ -154,6 +156,37 @@ and signing requests.
region = us-east-1
+Assume Role with MFA token
+
+To create a session with support for assuming an IAM role with MFA set the
+session option AssumeRoleTokenProvider to a function that will prompt for the
+MFA token code when the SDK assumes the role and refreshes the role's credentials.
+This allows you to configure the SDK via the shared config to assumea role
+with MFA tokens.
+
+In order for the SDK to assume a role with MFA the SharedConfigState
+session option must be set to SharedConfigEnable, or AWS_SDK_LOAD_CONFIG
+environment variable set.
+
+The shared configuration instructs the SDK to assume an IAM role with MFA
+when the mfa_serial configuration field is set in the shared config
+(~/.aws/config) or shared credentials (~/.aws/credentials) file.
+
+If mfa_serial is set in the configuration, the SDK will assume the role, and
+the AssumeRoleTokenProvider session option is not set an an error will
+be returned when creating the session.
+
+ sess := session.Must(session.NewSessionWithOptions(session.Options{
+ AssumeRoleTokenProvider: stscreds.StdinTokenProvider,
+ }))
+
+ // Create service client value configured for credentials
+ // from assumed role.
+ svc := s3.New(sess)
+
+To setup assume role outside of a session see the stscrds.AssumeRoleProvider
+documentation.
+
Environment Variables
When a Session is created several environment variables can be set to adjust
@@ -218,6 +251,24 @@ $HOME/.aws/config on Linux/Unix based systems, and
AWS_CONFIG_FILE=$HOME/my_shared_config
+Path to a custom Credentials Authority (CA) bundle PEM file that the SDK
+will use instead of the default system's root CA bundle. Use this only
+if you want to replace the CA bundle the SDK uses for TLS requests.
+
+ AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle
+
+Enabling this option will attempt to merge the Transport into the SDK's HTTP
+client. If the client's Transport is not a http.Transport an error will be
+returned. If the Transport's TLS config is set this option will cause the SDK
+to overwrite the Transport's TLS config's RootCAs value. If the CA bundle file
+contains multiple certificates all of them will be loaded.
+
+The Session option CustomCABundle is also available when creating sessions
+to also enable this feature. CustomCABundle session option field has priority
+over the AWS_CA_BUNDLE environment variable, and will be used if both are set.
+Setting a custom HTTPClient in the aws.Config options will override this setting.
+To use this option and custom HTTP client, the HTTP client needs to be provided
+when creating the session. Not the service client.
*/
package session
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
index d2f0c84..e6278a7 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go
@@ -75,6 +75,24 @@ type envConfig struct {
//
// AWS_CONFIG_FILE=$HOME/my_shared_config
SharedConfigFile string
+
+ // Sets the path to a custom Credentials Authroity (CA) Bundle PEM file
+ // that the SDK will use instead of the the system's root CA bundle.
+ // Only use this if you want to configure the SDK to use a custom set
+ // of CAs.
+ //
+ // Enabling this option will attempt to merge the Transport
+ // into the SDK's HTTP client. If the client's Transport is
+ // not a http.Transport an error will be returned. If the
+ // Transport's TLS config is set this option will cause the
+ // SDK to overwrite the Transport's TLS config's RootCAs value.
+ //
+ // Setting a custom HTTPClient in the aws.Config options will override this setting.
+ // To use this option and custom HTTP client, the HTTP client needs to be provided
+ // when creating the session. Not the service client.
+ //
+ // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle
+ CustomCABundle string
}
var (
@@ -150,6 +168,8 @@ func envConfigLoad(enableSharedConfig bool) envConfig {
cfg.SharedCredentialsFile = sharedCredentialsFilename()
cfg.SharedConfigFile = sharedConfigFilename()
+ cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE")
+
return cfg
}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
index 3d52fc2..96c740d 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go
@@ -1,7 +1,13 @@
package session
import (
+ "crypto/tls"
+ "crypto/x509"
"fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "os"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
@@ -52,7 +58,7 @@ func New(cfgs ...*aws.Config) *Session {
envCfg := loadEnvConfig()
if envCfg.EnableSharedConfig {
- s, err := newSession(envCfg, cfgs...)
+ s, err := newSession(Options{}, envCfg, cfgs...)
if err != nil {
// Old session.New expected all errors to be discovered when
// a request is made, and would report the errors then. This
@@ -73,7 +79,7 @@ func New(cfgs ...*aws.Config) *Session {
return s
}
- return oldNewSession(cfgs...)
+ return deprecatedNewSession(cfgs...)
}
// NewSession returns a new Session created from SDK defaults, config files,
@@ -92,9 +98,10 @@ func New(cfgs ...*aws.Config) *Session {
// control through code how the Session will be created. Such as specifying the
// config profile, and controlling if shared config is enabled or not.
func NewSession(cfgs ...*aws.Config) (*Session, error) {
- envCfg := loadEnvConfig()
+ opts := Options{}
+ opts.Config.MergeIn(cfgs...)
- return newSession(envCfg, cfgs...)
+ return NewSessionWithOptions(opts)
}
// SharedConfigState provides the ability to optionally override the state
@@ -147,6 +154,41 @@ type Options struct {
// will allow you to override the AWS_SDK_LOAD_CONFIG environment variable
// and enable or disable the shared config functionality.
SharedConfigState SharedConfigState
+
+ // When the SDK's shared config is configured to assume a role with MFA
+ // this option is required in order to provide the mechanism that will
+ // retrieve the MFA token. There is no default value for this field. If
+ // it is not set an error will be returned when creating the session.
+ //
+ // This token provider will be called when ever the assumed role's
+ // credentials need to be refreshed. Within the context of service clients
+ // all sharing the same session the SDK will ensure calls to the token
+ // provider are atomic. When sharing a token provider across multiple
+ // sessions additional synchronization logic is needed to ensure the
+ // token providers do not introduce race conditions. It is recommend to
+ // share the session where possible.
+ //
+ // stscreds.StdinTokenProvider is a basic implementation that will prompt
+ // from stdin for the MFA token code.
+ //
+ // This field is only used if the shared configuration is enabled, and
+ // the config enables assume role wit MFA via the mfa_serial field.
+ AssumeRoleTokenProvider func() (string, error)
+
+ // Reader for a custom Credentials Authority (CA) bundle in PEM format that
+ // the SDK will use instead of the default system's root CA bundle. Use this
+ // only if you want to replace the CA bundle the SDK uses for TLS requests.
+ //
+ // Enabling this option will attempt to merge the Transport into the SDK's HTTP
+ // client. If the client's Transport is not a http.Transport an error will be
+ // returned. If the Transport's TLS config is set this option will cause the SDK
+ // to overwrite the Transport's TLS config's RootCAs value. If the CA
+ // bundle reader contains multiple certificates all of them will be loaded.
+ //
+ // The Session option CustomCABundle is also available when creating sessions
+ // to also enable this feature. CustomCABundle session option field has priority
+ // over the AWS_CA_BUNDLE environment variable, and will be used if both are set.
+ CustomCABundle io.Reader
}
// NewSessionWithOptions returns a new Session created from SDK defaults, config files,
@@ -161,23 +203,23 @@ type Options struct {
// to be built with retrieving credentials with AssumeRole set in the config.
//
// // Equivalent to session.New
-// sess, err := session.NewSessionWithOptions(session.Options{})
+// sess := session.Must(session.NewSessionWithOptions(session.Options{}))
//
// // Specify profile to load for the session's config
-// sess, err := session.NewSessionWithOptions(session.Options{
+// sess := session.Must(session.NewSessionWithOptions(session.Options{
// Profile: "profile_name",
-// })
+// }))
//
// // Specify profile for config and region for requests
-// sess, err := session.NewSessionWithOptions(session.Options{
+// sess := session.Must(session.NewSessionWithOptions(session.Options{
// Config: aws.Config{Region: aws.String("us-east-1")},
// Profile: "profile_name",
-// })
+// }))
//
// // Force enable Shared Config support
-// sess, err := session.NewSessionWithOptions(session.Options{
+// sess := session.Must(session.NewSessionWithOptions(session.Options{
// SharedConfigState: SharedConfigEnable,
-// })
+// }))
func NewSessionWithOptions(opts Options) (*Session, error) {
var envCfg envConfig
if opts.SharedConfigState == SharedConfigEnable {
@@ -197,7 +239,18 @@ func NewSessionWithOptions(opts Options) (*Session, error) {
envCfg.EnableSharedConfig = true
}
- return newSession(envCfg, &opts.Config)
+ // Only use AWS_CA_BUNDLE if session option is not provided.
+ if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil {
+ f, err := os.Open(envCfg.CustomCABundle)
+ if err != nil {
+ return nil, awserr.New("LoadCustomCABundleError",
+ "failed to open custom CA bundle PEM file", err)
+ }
+ defer f.Close()
+ opts.CustomCABundle = f
+ }
+
+ return newSession(opts, envCfg, &opts.Config)
}
// Must is a helper function to ensure the Session is valid and there was no
@@ -215,7 +268,7 @@ func Must(sess *Session, err error) *Session {
return sess
}
-func oldNewSession(cfgs ...*aws.Config) *Session {
+func deprecatedNewSession(cfgs ...*aws.Config) *Session {
cfg := defaults.Config()
handlers := defaults.Handlers()
@@ -242,7 +295,7 @@ func oldNewSession(cfgs ...*aws.Config) *Session {
return s
}
-func newSession(envCfg envConfig, cfgs ...*aws.Config) (*Session, error) {
+func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) {
cfg := defaults.Config()
handlers := defaults.Handlers()
@@ -266,7 +319,9 @@ func newSession(envCfg envConfig, cfgs ...*aws.Config) (*Session, error) {
return nil, err
}
- mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers)
+ if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil {
+ return nil, err
+ }
s := &Session{
Config: cfg,
@@ -275,10 +330,62 @@ func newSession(envCfg envConfig, cfgs ...*aws.Config) (*Session, error) {
initHandlers(s)
+ // Setup HTTP client with custom cert bundle if enabled
+ if opts.CustomCABundle != nil {
+ if err := loadCustomCABundle(s, opts.CustomCABundle); err != nil {
+ return nil, err
+ }
+ }
+
return s, nil
}
-func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers) {
+func loadCustomCABundle(s *Session, bundle io.Reader) error {
+ var t *http.Transport
+ switch v := s.Config.HTTPClient.Transport.(type) {
+ case *http.Transport:
+ t = v
+ default:
+ if s.Config.HTTPClient.Transport != nil {
+ return awserr.New("LoadCustomCABundleError",
+ "unable to load custom CA bundle, HTTPClient's transport unsupported type", nil)
+ }
+ }
+ if t == nil {
+ t = &http.Transport{}
+ }
+
+ p, err := loadCertPool(bundle)
+ if err != nil {
+ return err
+ }
+ if t.TLSClientConfig == nil {
+ t.TLSClientConfig = &tls.Config{}
+ }
+ t.TLSClientConfig.RootCAs = p
+
+ s.Config.HTTPClient.Transport = t
+
+ return nil
+}
+
+func loadCertPool(r io.Reader) (*x509.CertPool, error) {
+ b, err := ioutil.ReadAll(r)
+ if err != nil {
+ return nil, awserr.New("LoadCustomCABundleError",
+ "failed to read custom CA bundle PEM file", err)
+ }
+
+ p := x509.NewCertPool()
+ if !p.AppendCertsFromPEM(b) {
+ return nil, awserr.New("LoadCustomCABundleError",
+ "failed to load custom CA bundle PEM file", err)
+ }
+
+ return p, nil
+}
+
+func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers, sessOpts Options) error {
// Merge in user provided configuration
cfg.MergeIn(userCfg)
@@ -302,6 +409,11 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share
cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds(
sharedCfg.AssumeRoleSource.Creds,
)
+ if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil {
+ // AssumeRole Token provider is required if doing Assume Role
+ // with MFA.
+ return AssumeRoleTokenProviderNotSetError{}
+ }
cfg.Credentials = stscreds.NewCredentials(
&Session{
Config: &cfgCp,
@@ -311,11 +423,16 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share
func(opt *stscreds.AssumeRoleProvider) {
opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName
+ // Assume role with external ID
if len(sharedCfg.AssumeRole.ExternalID) > 0 {
opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID)
}
- // MFA not supported
+ // Assume role with MFA
+ if len(sharedCfg.AssumeRole.MFASerial) > 0 {
+ opt.SerialNumber = aws.String(sharedCfg.AssumeRole.MFASerial)
+ opt.TokenProvider = sessOpts.AssumeRoleTokenProvider
+ }
},
)
} else if len(sharedCfg.Creds.AccessKeyID) > 0 {
@@ -336,6 +453,33 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg share
})
}
}
+
+ return nil
+}
+
+// AssumeRoleTokenProviderNotSetError is an error returned when creating a session when the
+// MFAToken option is not set when shared config is configured load assume a
+// role with an MFA token.
+type AssumeRoleTokenProviderNotSetError struct{}
+
+// Code is the short id of the error.
+func (e AssumeRoleTokenProviderNotSetError) Code() string {
+ return "AssumeRoleTokenProviderNotSetError"
+}
+
+// Message is the description of the error
+func (e AssumeRoleTokenProviderNotSetError) Message() string {
+ return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.")
+}
+
+// OrigErr is the underlying error that caused the failure.
+func (e AssumeRoleTokenProviderNotSetError) OrigErr() error {
+ return nil
+}
+
+// Error satisfies the error interface.
+func (e AssumeRoleTokenProviderNotSetError) Error() string {
+ return awserr.SprintError(e.Code(), e.Message(), "", nil)
}
type credProviderError struct {
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go
new file mode 100644
index 0000000..6aa2ed2
--- /dev/null
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go
@@ -0,0 +1,7 @@
+package v4
+
+// WithUnsignedPayload will enable and set the UnsignedPayload field to
+// true of the signer.
+func WithUnsignedPayload(v4 *Signer) {
+ v4.UnsignedPayload = true
+}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
index 98bfe74..434ac87 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
@@ -194,6 +194,10 @@ type Signer struct {
// This value should only be used for testing. If it is nil the default
// time.Now will be used.
currentTimeFn func() time.Time
+
+ // UnsignedPayload will prevent signing of the payload. This will only
+ // work for services that have support for this.
+ UnsignedPayload bool
}
// NewSigner returns a Signer pointer configured with the credentials and optional
@@ -227,6 +231,7 @@ type signingCtx struct {
isPresign bool
formattedTime string
formattedShortTime string
+ unsignedPayload bool
bodyDigest string
signedHeaders string
@@ -317,6 +322,7 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi
ServiceName: service,
Region: region,
DisableURIPathEscaping: v4.DisableURIPathEscaping,
+ unsignedPayload: v4.UnsignedPayload,
}
for key := range ctx.Query {
@@ -409,7 +415,18 @@ var SignRequestHandler = request.NamedHandler{
func SignSDKRequest(req *request.Request) {
signSDKRequestWithCurrTime(req, time.Now)
}
-func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time) {
+
+// BuildNamedHandler will build a generic handler for signing.
+func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler {
+ return request.NamedHandler{
+ Name: name,
+ Fn: func(req *request.Request) {
+ signSDKRequestWithCurrTime(req, time.Now, opts...)
+ },
+ }
+}
+
+func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) {
// If the request does not need to be signed ignore the signing of the
// request if the AnonymousCredentials object is used.
if req.Config.Credentials == credentials.AnonymousCredentials {
@@ -441,6 +458,10 @@ func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time
v4.DisableRequestBodyOverwrite = true
})
+ for _, opt := range opts {
+ opt(v4)
+ }
+
signingTime := req.Time
if !req.LastSignedAt.IsZero() {
signingTime = req.LastSignedAt
@@ -634,14 +655,14 @@ func (ctx *signingCtx) buildSignature() {
func (ctx *signingCtx) buildBodyDigest() {
hash := ctx.Request.Header.Get("X-Amz-Content-Sha256")
if hash == "" {
- if ctx.isPresign && ctx.ServiceName == "s3" {
+ if ctx.unsignedPayload || (ctx.isPresign && ctx.ServiceName == "s3") {
hash = "UNSIGNED-PAYLOAD"
} else if ctx.Body == nil {
hash = emptyStringSHA256
} else {
hash = hex.EncodeToString(makeSha256Reader(ctx.Body))
}
- if ctx.ServiceName == "s3" || ctx.ServiceName == "glacier" {
+ if ctx.unsignedPayload || ctx.ServiceName == "s3" || ctx.ServiceName == "glacier" {
ctx.Request.Header.Set("X-Amz-Content-Sha256", hash)
}
}
diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go
index be7ac14..d1b587d 100644
--- a/vendor/github.com/aws/aws-sdk-go/aws/version.go
+++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go
@@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
-const SDKVersion = "1.6.25"
+const SDKVersion = "1.8.11"
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
index f434ab7..524ca95 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go
@@ -80,7 +80,6 @@ func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix stri
continue
}
-
if protocol.CanSetIdempotencyToken(value.Field(i), field) {
token := protocol.GetIdempotencyToken()
elemValue = reflect.ValueOf(token)
@@ -124,7 +123,11 @@ func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string
// check for unflattened list member
if !q.isEC2 && tag.Get("flattened") == "" {
- prefix += ".member"
+ if listName := tag.Get("locationNameList"); listName == "" {
+ prefix += ".member"
+ } else {
+ prefix += "." + listName
+ }
}
for i := 0; i < value.Len(); i++ {
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
index 20a41d4..7161835 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go
@@ -4,6 +4,7 @@ package rest
import (
"bytes"
"encoding/base64"
+ "encoding/json"
"fmt"
"io"
"net/http"
@@ -82,8 +83,12 @@ func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bo
if name == "" {
name = field.Name
}
- if m.Kind() == reflect.Ptr {
+ if kind := m.Kind(); kind == reflect.Ptr {
m = m.Elem()
+ } else if kind == reflect.Interface {
+ if !m.Elem().IsValid() {
+ continue
+ }
}
if !m.IsValid() {
continue
@@ -95,16 +100,16 @@ func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bo
var err error
switch field.Tag.Get("location") {
case "headers": // header maps
- err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag.Get("locationName"))
+ err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag)
case "header":
- err = buildHeader(&r.HTTPRequest.Header, m, name)
+ err = buildHeader(&r.HTTPRequest.Header, m, name, field.Tag)
case "uri":
- err = buildURI(r.HTTPRequest.URL, m, name)
+ err = buildURI(r.HTTPRequest.URL, m, name, field.Tag)
case "querystring":
- err = buildQueryString(query, m, name)
+ err = buildQueryString(query, m, name, field.Tag)
default:
if buildGETQuery {
- err = buildQueryString(query, m, name)
+ err = buildQueryString(query, m, name, field.Tag)
}
}
r.Error = err
@@ -145,8 +150,8 @@ func buildBody(r *request.Request, v reflect.Value) {
}
}
-func buildHeader(header *http.Header, v reflect.Value, name string) error {
- str, err := convertType(v)
+func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) error {
+ str, err := convertType(v, tag)
if err == errValueNotSet {
return nil
} else if err != nil {
@@ -158,9 +163,10 @@ func buildHeader(header *http.Header, v reflect.Value, name string) error {
return nil
}
-func buildHeaderMap(header *http.Header, v reflect.Value, prefix string) error {
+func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) error {
+ prefix := tag.Get("locationName")
for _, key := range v.MapKeys() {
- str, err := convertType(v.MapIndex(key))
+ str, err := convertType(v.MapIndex(key), tag)
if err == errValueNotSet {
continue
} else if err != nil {
@@ -173,8 +179,8 @@ func buildHeaderMap(header *http.Header, v reflect.Value, prefix string) error {
return nil
}
-func buildURI(u *url.URL, v reflect.Value, name string) error {
- value, err := convertType(v)
+func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) error {
+ value, err := convertType(v, tag)
if err == errValueNotSet {
return nil
} else if err != nil {
@@ -190,7 +196,7 @@ func buildURI(u *url.URL, v reflect.Value, name string) error {
return nil
}
-func buildQueryString(query url.Values, v reflect.Value, name string) error {
+func buildQueryString(query url.Values, v reflect.Value, name string, tag reflect.StructTag) error {
switch value := v.Interface().(type) {
case []*string:
for _, item := range value {
@@ -207,7 +213,7 @@ func buildQueryString(query url.Values, v reflect.Value, name string) error {
}
}
default:
- str, err := convertType(v)
+ str, err := convertType(v, tag)
if err == errValueNotSet {
return nil
} else if err != nil {
@@ -246,7 +252,7 @@ func EscapePath(path string, encodeSep bool) string {
return buf.String()
}
-func convertType(v reflect.Value) (string, error) {
+func convertType(v reflect.Value, tag reflect.StructTag) (string, error) {
v = reflect.Indirect(v)
if !v.IsValid() {
return "", errValueNotSet
@@ -266,6 +272,16 @@ func convertType(v reflect.Value) (string, error) {
str = strconv.FormatFloat(value, 'f', -1, 64)
case time.Time:
str = value.UTC().Format(RFC822)
+ case aws.JSONValue:
+ b, err := json.Marshal(value)
+ if err != nil {
+ return "", err
+ }
+ if tag.Get("location") == "header" {
+ str = base64.StdEncoding.EncodeToString(b)
+ } else {
+ str = string(b)
+ }
default:
err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type())
return "", err
diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
index 9c00921..7a779ee 100644
--- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
+++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go
@@ -3,6 +3,7 @@ package rest
import (
"bytes"
"encoding/base64"
+ "encoding/json"
"fmt"
"io"
"io/ioutil"
@@ -12,6 +13,7 @@ import (
"strings"
"time"
+ "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
)
@@ -111,7 +113,7 @@ func unmarshalLocationElements(r *request.Request, v reflect.Value) {
case "statusCode":
unmarshalStatusCode(m, r.HTTPResponse.StatusCode)
case "header":
- err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name))
+ err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name), field.Tag)
if err != nil {
r.Error = awserr.New("SerializationError", "failed to decode REST response", err)
break
@@ -158,8 +160,13 @@ func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) err
return nil
}
-func unmarshalHeader(v reflect.Value, header string) error {
- if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) {
+func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error {
+ isJSONValue := tag.Get("type") == "jsonvalue"
+ if isJSONValue {
+ if len(header) == 0 {
+ return nil
+ }
+ } else if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) {
return nil
}
@@ -196,6 +203,22 @@ func unmarshalHeader(v reflect.Value, header string) error {
return err
}
v.Set(reflect.ValueOf(&t))
+ case aws.JSONValue:
+ b := []byte(header)
+ var err error
+ if tag.Get("location") == "header" {
+ b, err = base64.StdEncoding.DecodeString(header)
+ if err != nil {
+ return err
+ }
+ }
+
+ m := aws.JSONValue{}
+ err = json.Unmarshal(b, &m)
+ if err != nil {
+ return err
+ }
+ v.Set(reflect.ValueOf(m))
default:
err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type())
return err
diff --git a/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go b/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go
deleted file mode 100644
index b51e944..0000000
--- a/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go
+++ /dev/null
@@ -1,134 +0,0 @@
-package waiter
-
-import (
- "fmt"
- "reflect"
- "time"
-
- "github.com/aws/aws-sdk-go/aws"
- "github.com/aws/aws-sdk-go/aws/awserr"
- "github.com/aws/aws-sdk-go/aws/awsutil"
- "github.com/aws/aws-sdk-go/aws/request"
-)
-
-// A Config provides a collection of configuration values to setup a generated
-// waiter code with.
-type Config struct {
- Name string
- Delay int
- MaxAttempts int
- Operation string
- Acceptors []WaitAcceptor
-}
-
-// A WaitAcceptor provides the information needed to wait for an API operation
-// to complete.
-type WaitAcceptor struct {
- Expected interface{}
- Matcher string
- State string
- Argument string
-}
-
-// A Waiter provides waiting for an operation to complete.
-type Waiter struct {
- Config
- Client interface{}
- Input interface{}
-}
-
-// Wait waits for an operation to complete, expire max attempts, or fail. Error
-// is returned if the operation fails.
-func (w *Waiter) Wait() error {
- client := reflect.ValueOf(w.Client)
- in := reflect.ValueOf(w.Input)
- method := client.MethodByName(w.Config.Operation + "Request")
-
- for i := 0; i < w.MaxAttempts; i++ {
- res := method.Call([]reflect.Value{in})
- req := res[0].Interface().(*request.Request)
- req.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Waiter"))
-
- err := req.Send()
- for _, a := range w.Acceptors {
- result := false
- var vals []interface{}
- switch a.Matcher {
- case "pathAll", "path":
- // Require all matches to be equal for result to match
- vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument)
- if len(vals) == 0 {
- break
- }
- result = true
- for _, val := range vals {
- if !awsutil.DeepEqual(val, a.Expected) {
- result = false
- break
- }
- }
- case "pathAny":
- // Only a single match needs to equal for the result to match
- vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument)
- for _, val := range vals {
- if awsutil.DeepEqual(val, a.Expected) {
- result = true
- break
- }
- }
- case "status":
- s := a.Expected.(int)
- result = s == req.HTTPResponse.StatusCode
- case "error":
- if aerr, ok := err.(awserr.Error); ok {
- result = aerr.Code() == a.Expected.(string)
- }
- case "pathList":
- // ignored matcher
- default:
- logf(client, "WARNING: Waiter for %s encountered unexpected matcher: %s",
- w.Config.Operation, a.Matcher)
- }
-
- if !result {
- // If there was no matching result found there is nothing more to do
- // for this response, retry the request.
- continue
- }
-
- switch a.State {
- case "success":
- // waiter completed
- return nil
- case "failure":
- // Waiter failure state triggered
- return awserr.New("ResourceNotReady",
- fmt.Sprintf("failed waiting for successful resource state"), err)
- case "retry":
- // clear the error and retry the operation
- err = nil
- default:
- logf(client, "WARNING: Waiter for %s encountered unexpected state: %s",
- w.Config.Operation, a.State)
- }
- }
- if err != nil {
- return err
- }
-
- time.Sleep(time.Second * time.Duration(w.Delay))
- }
-
- return awserr.New("ResourceNotReady",
- fmt.Sprintf("exceeded %d wait attempts", w.MaxAttempts), nil)
-}
-
-func logf(client reflect.Value, msg string, args ...interface{}) {
- cfgVal := client.FieldByName("Config")
- if !cfgVal.IsValid() {
- return
- }
- if cfg, ok := cfgVal.Interface().(*aws.Config); ok && cfg.Logger != nil {
- cfg.Logger.Log(fmt.Sprintf(msg, args...))
- }
-}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go
index 9b205f3..3f0fc2f 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go
@@ -1,4 +1,4 @@
-// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
// Package s3 provides a client for Amazon Simple Storage Service.
package s3
@@ -8,6 +8,7 @@ import (
"io"
"time"
+ "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/private/protocol"
@@ -79,8 +80,23 @@ func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload
func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) {
req, out := c.AbortMultipartUploadRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// AbortMultipartUploadWithContext is the same as AbortMultipartUpload with the addition of
+// the ability to pass a context and additional request options.
+//
+// See AbortMultipartUpload for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) AbortMultipartUploadWithContext(ctx aws.Context, input *AbortMultipartUploadInput, opts ...request.Option) (*AbortMultipartUploadOutput, error) {
+ req, out := c.AbortMultipartUploadRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opCompleteMultipartUpload = "CompleteMultipartUpload"
@@ -139,8 +155,23 @@ func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput)
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload
func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) {
req, out := c.CompleteMultipartUploadRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// CompleteMultipartUploadWithContext is the same as CompleteMultipartUpload with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CompleteMultipartUpload for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) CompleteMultipartUploadWithContext(ctx aws.Context, input *CompleteMultipartUploadInput, opts ...request.Option) (*CompleteMultipartUploadOutput, error) {
+ req, out := c.CompleteMultipartUploadRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opCopyObject = "CopyObject"
@@ -205,8 +236,23 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject
func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) {
req, out := c.CopyObjectRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// CopyObjectWithContext is the same as CopyObject with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CopyObject for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) CopyObjectWithContext(ctx aws.Context, input *CopyObjectInput, opts ...request.Option) (*CopyObjectOutput, error) {
+ req, out := c.CopyObjectRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opCreateBucket = "CreateBucket"
@@ -273,8 +319,23 @@ func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket
func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) {
req, out := c.CreateBucketRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// CreateBucketWithContext is the same as CreateBucket with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateBucket for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) CreateBucketWithContext(ctx aws.Context, input *CreateBucketInput, opts ...request.Option) (*CreateBucketOutput, error) {
+ req, out := c.CreateBucketRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opCreateMultipartUpload = "CreateMultipartUpload"
@@ -339,8 +400,23 @@ func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (re
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload
func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) {
req, out := c.CreateMultipartUploadRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// CreateMultipartUploadWithContext is the same as CreateMultipartUpload with the addition of
+// the ability to pass a context and additional request options.
+//
+// See CreateMultipartUpload for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) CreateMultipartUploadWithContext(ctx aws.Context, input *CreateMultipartUploadInput, opts ...request.Option) (*CreateMultipartUploadOutput, error) {
+ req, out := c.CreateMultipartUploadRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opDeleteBucket = "DeleteBucket"
@@ -402,8 +478,23 @@ func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket
func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) {
req, out := c.DeleteBucketRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// DeleteBucketWithContext is the same as DeleteBucket with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucket for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketWithContext(ctx aws.Context, input *DeleteBucketInput, opts ...request.Option) (*DeleteBucketOutput, error) {
+ req, out := c.DeleteBucketRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opDeleteBucketAnalyticsConfiguration = "DeleteBucketAnalyticsConfiguration"
@@ -465,8 +556,23 @@ func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyt
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration
func (c *S3) DeleteBucketAnalyticsConfiguration(input *DeleteBucketAnalyticsConfigurationInput) (*DeleteBucketAnalyticsConfigurationOutput, error) {
req, out := c.DeleteBucketAnalyticsConfigurationRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// DeleteBucketAnalyticsConfigurationWithContext is the same as DeleteBucketAnalyticsConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketAnalyticsConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *DeleteBucketAnalyticsConfigurationInput, opts ...request.Option) (*DeleteBucketAnalyticsConfigurationOutput, error) {
+ req, out := c.DeleteBucketAnalyticsConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opDeleteBucketCors = "DeleteBucketCors"
@@ -527,8 +633,23 @@ func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors
func (c *S3) DeleteBucketCors(input *DeleteBucketCorsInput) (*DeleteBucketCorsOutput, error) {
req, out := c.DeleteBucketCorsRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// DeleteBucketCorsWithContext is the same as DeleteBucketCors with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketCors for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketCorsWithContext(ctx aws.Context, input *DeleteBucketCorsInput, opts ...request.Option) (*DeleteBucketCorsOutput, error) {
+ req, out := c.DeleteBucketCorsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration"
@@ -590,8 +711,23 @@ func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInvent
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration
func (c *S3) DeleteBucketInventoryConfiguration(input *DeleteBucketInventoryConfigurationInput) (*DeleteBucketInventoryConfigurationOutput, error) {
req, out := c.DeleteBucketInventoryConfigurationRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// DeleteBucketInventoryConfigurationWithContext is the same as DeleteBucketInventoryConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketInventoryConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketInventoryConfigurationWithContext(ctx aws.Context, input *DeleteBucketInventoryConfigurationInput, opts ...request.Option) (*DeleteBucketInventoryConfigurationOutput, error) {
+ req, out := c.DeleteBucketInventoryConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opDeleteBucketLifecycle = "DeleteBucketLifecycle"
@@ -652,8 +788,23 @@ func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (re
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle
func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) {
req, out := c.DeleteBucketLifecycleRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// DeleteBucketLifecycleWithContext is the same as DeleteBucketLifecycle with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketLifecycle for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketLifecycleWithContext(ctx aws.Context, input *DeleteBucketLifecycleInput, opts ...request.Option) (*DeleteBucketLifecycleOutput, error) {
+ req, out := c.DeleteBucketLifecycleRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opDeleteBucketMetricsConfiguration = "DeleteBucketMetricsConfiguration"
@@ -715,8 +866,23 @@ func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsC
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration
func (c *S3) DeleteBucketMetricsConfiguration(input *DeleteBucketMetricsConfigurationInput) (*DeleteBucketMetricsConfigurationOutput, error) {
req, out := c.DeleteBucketMetricsConfigurationRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// DeleteBucketMetricsConfigurationWithContext is the same as DeleteBucketMetricsConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketMetricsConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketMetricsConfigurationWithContext(ctx aws.Context, input *DeleteBucketMetricsConfigurationInput, opts ...request.Option) (*DeleteBucketMetricsConfigurationOutput, error) {
+ req, out := c.DeleteBucketMetricsConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opDeleteBucketPolicy = "DeleteBucketPolicy"
@@ -777,8 +943,23 @@ func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *req
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy
func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPolicyOutput, error) {
req, out := c.DeleteBucketPolicyRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// DeleteBucketPolicyWithContext is the same as DeleteBucketPolicy with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketPolicy for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketPolicyWithContext(ctx aws.Context, input *DeleteBucketPolicyInput, opts ...request.Option) (*DeleteBucketPolicyOutput, error) {
+ req, out := c.DeleteBucketPolicyRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opDeleteBucketReplication = "DeleteBucketReplication"
@@ -839,8 +1020,23 @@ func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput)
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication
func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) {
req, out := c.DeleteBucketReplicationRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// DeleteBucketReplicationWithContext is the same as DeleteBucketReplication with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketReplication for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketReplicationWithContext(ctx aws.Context, input *DeleteBucketReplicationInput, opts ...request.Option) (*DeleteBucketReplicationOutput, error) {
+ req, out := c.DeleteBucketReplicationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opDeleteBucketTagging = "DeleteBucketTagging"
@@ -901,8 +1097,23 @@ func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *r
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging
func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucketTaggingOutput, error) {
req, out := c.DeleteBucketTaggingRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// DeleteBucketTaggingWithContext is the same as DeleteBucketTagging with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketTagging for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketTaggingWithContext(ctx aws.Context, input *DeleteBucketTaggingInput, opts ...request.Option) (*DeleteBucketTaggingOutput, error) {
+ req, out := c.DeleteBucketTaggingRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opDeleteBucketWebsite = "DeleteBucketWebsite"
@@ -963,8 +1174,23 @@ func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *r
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite
func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) {
req, out := c.DeleteBucketWebsiteRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// DeleteBucketWebsiteWithContext is the same as DeleteBucketWebsite with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteBucketWebsite for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteBucketWebsiteWithContext(ctx aws.Context, input *DeleteBucketWebsiteInput, opts ...request.Option) (*DeleteBucketWebsiteOutput, error) {
+ req, out := c.DeleteBucketWebsiteRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opDeleteObject = "DeleteObject"
@@ -1025,8 +1251,23 @@ func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject
func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) {
req, out := c.DeleteObjectRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// DeleteObjectWithContext is the same as DeleteObject with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteObject for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteObjectWithContext(ctx aws.Context, input *DeleteObjectInput, opts ...request.Option) (*DeleteObjectOutput, error) {
+ req, out := c.DeleteObjectRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opDeleteObjectTagging = "DeleteObjectTagging"
@@ -1085,8 +1326,23 @@ func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *r
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging
func (c *S3) DeleteObjectTagging(input *DeleteObjectTaggingInput) (*DeleteObjectTaggingOutput, error) {
req, out := c.DeleteObjectTaggingRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// DeleteObjectTaggingWithContext is the same as DeleteObjectTagging with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteObjectTagging for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteObjectTaggingWithContext(ctx aws.Context, input *DeleteObjectTaggingInput, opts ...request.Option) (*DeleteObjectTaggingOutput, error) {
+ req, out := c.DeleteObjectTaggingRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opDeleteObjects = "DeleteObjects"
@@ -1146,8 +1402,23 @@ func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Reque
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects
func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, error) {
req, out := c.DeleteObjectsRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// DeleteObjectsWithContext is the same as DeleteObjects with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DeleteObjects for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) DeleteObjectsWithContext(ctx aws.Context, input *DeleteObjectsInput, opts ...request.Option) (*DeleteObjectsOutput, error) {
+ req, out := c.DeleteObjectsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration"
@@ -1206,8 +1477,23 @@ func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateC
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration
func (c *S3) GetBucketAccelerateConfiguration(input *GetBucketAccelerateConfigurationInput) (*GetBucketAccelerateConfigurationOutput, error) {
req, out := c.GetBucketAccelerateConfigurationRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// GetBucketAccelerateConfigurationWithContext is the same as GetBucketAccelerateConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketAccelerateConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketAccelerateConfigurationWithContext(ctx aws.Context, input *GetBucketAccelerateConfigurationInput, opts ...request.Option) (*GetBucketAccelerateConfigurationOutput, error) {
+ req, out := c.GetBucketAccelerateConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opGetBucketAcl = "GetBucketAcl"
@@ -1266,8 +1552,23 @@ func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl
func (c *S3) GetBucketAcl(input *GetBucketAclInput) (*GetBucketAclOutput, error) {
req, out := c.GetBucketAclRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// GetBucketAclWithContext is the same as GetBucketAcl with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketAcl for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketAclWithContext(ctx aws.Context, input *GetBucketAclInput, opts ...request.Option) (*GetBucketAclOutput, error) {
+ req, out := c.GetBucketAclRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opGetBucketAnalyticsConfiguration = "GetBucketAnalyticsConfiguration"
@@ -1327,8 +1628,23 @@ func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsCon
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration
func (c *S3) GetBucketAnalyticsConfiguration(input *GetBucketAnalyticsConfigurationInput) (*GetBucketAnalyticsConfigurationOutput, error) {
req, out := c.GetBucketAnalyticsConfigurationRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// GetBucketAnalyticsConfigurationWithContext is the same as GetBucketAnalyticsConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketAnalyticsConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *GetBucketAnalyticsConfigurationInput, opts ...request.Option) (*GetBucketAnalyticsConfigurationOutput, error) {
+ req, out := c.GetBucketAnalyticsConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opGetBucketCors = "GetBucketCors"
@@ -1387,8 +1703,23 @@ func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Reque
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors
func (c *S3) GetBucketCors(input *GetBucketCorsInput) (*GetBucketCorsOutput, error) {
req, out := c.GetBucketCorsRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// GetBucketCorsWithContext is the same as GetBucketCors with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketCors for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketCorsWithContext(ctx aws.Context, input *GetBucketCorsInput, opts ...request.Option) (*GetBucketCorsOutput, error) {
+ req, out := c.GetBucketCorsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration"
@@ -1448,8 +1779,23 @@ func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryCon
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration
func (c *S3) GetBucketInventoryConfiguration(input *GetBucketInventoryConfigurationInput) (*GetBucketInventoryConfigurationOutput, error) {
req, out := c.GetBucketInventoryConfigurationRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// GetBucketInventoryConfigurationWithContext is the same as GetBucketInventoryConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketInventoryConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketInventoryConfigurationWithContext(ctx aws.Context, input *GetBucketInventoryConfigurationInput, opts ...request.Option) (*GetBucketInventoryConfigurationOutput, error) {
+ req, out := c.GetBucketInventoryConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opGetBucketLifecycle = "GetBucketLifecycle"
@@ -1511,8 +1857,23 @@ func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *req
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle
func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) {
req, out := c.GetBucketLifecycleRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// GetBucketLifecycleWithContext is the same as GetBucketLifecycle with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketLifecycle for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketLifecycleWithContext(ctx aws.Context, input *GetBucketLifecycleInput, opts ...request.Option) (*GetBucketLifecycleOutput, error) {
+ req, out := c.GetBucketLifecycleRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration"
@@ -1571,8 +1932,23 @@ func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleCon
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration
func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurationInput) (*GetBucketLifecycleConfigurationOutput, error) {
req, out := c.GetBucketLifecycleConfigurationRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// GetBucketLifecycleConfigurationWithContext is the same as GetBucketLifecycleConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketLifecycleConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketLifecycleConfigurationWithContext(ctx aws.Context, input *GetBucketLifecycleConfigurationInput, opts ...request.Option) (*GetBucketLifecycleConfigurationOutput, error) {
+ req, out := c.GetBucketLifecycleConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opGetBucketLocation = "GetBucketLocation"
@@ -1631,8 +2007,23 @@ func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *reque
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation
func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) {
req, out := c.GetBucketLocationRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// GetBucketLocationWithContext is the same as GetBucketLocation with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketLocation for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketLocationWithContext(ctx aws.Context, input *GetBucketLocationInput, opts ...request.Option) (*GetBucketLocationOutput, error) {
+ req, out := c.GetBucketLocationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opGetBucketLogging = "GetBucketLogging"
@@ -1692,8 +2083,23 @@ func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging
func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) {
req, out := c.GetBucketLoggingRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// GetBucketLoggingWithContext is the same as GetBucketLogging with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketLogging for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketLoggingWithContext(ctx aws.Context, input *GetBucketLoggingInput, opts ...request.Option) (*GetBucketLoggingOutput, error) {
+ req, out := c.GetBucketLoggingRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opGetBucketMetricsConfiguration = "GetBucketMetricsConfiguration"
@@ -1753,8 +2159,23 @@ func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigu
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration
func (c *S3) GetBucketMetricsConfiguration(input *GetBucketMetricsConfigurationInput) (*GetBucketMetricsConfigurationOutput, error) {
req, out := c.GetBucketMetricsConfigurationRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// GetBucketMetricsConfigurationWithContext is the same as GetBucketMetricsConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketMetricsConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketMetricsConfigurationWithContext(ctx aws.Context, input *GetBucketMetricsConfigurationInput, opts ...request.Option) (*GetBucketMetricsConfigurationOutput, error) {
+ req, out := c.GetBucketMetricsConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opGetBucketNotification = "GetBucketNotification"
@@ -1816,8 +2237,23 @@ func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurat
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification
func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) {
req, out := c.GetBucketNotificationRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// GetBucketNotificationWithContext is the same as GetBucketNotification with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketNotification for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketNotificationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfigurationDeprecated, error) {
+ req, out := c.GetBucketNotificationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration"
@@ -1876,8 +2312,23 @@ func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificat
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration
func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConfigurationRequest) (*NotificationConfiguration, error) {
req, out := c.GetBucketNotificationConfigurationRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// GetBucketNotificationConfigurationWithContext is the same as GetBucketNotificationConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketNotificationConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketNotificationConfigurationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfiguration, error) {
+ req, out := c.GetBucketNotificationConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opGetBucketPolicy = "GetBucketPolicy"
@@ -1936,8 +2387,23 @@ func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.R
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy
func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutput, error) {
req, out := c.GetBucketPolicyRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// GetBucketPolicyWithContext is the same as GetBucketPolicy with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketPolicy for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketPolicyWithContext(ctx aws.Context, input *GetBucketPolicyInput, opts ...request.Option) (*GetBucketPolicyOutput, error) {
+ req, out := c.GetBucketPolicyRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opGetBucketReplication = "GetBucketReplication"
@@ -1996,8 +2462,23 @@ func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication
func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) {
req, out := c.GetBucketReplicationRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// GetBucketReplicationWithContext is the same as GetBucketReplication with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketReplication for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketReplicationWithContext(ctx aws.Context, input *GetBucketReplicationInput, opts ...request.Option) (*GetBucketReplicationOutput, error) {
+ req, out := c.GetBucketReplicationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opGetBucketRequestPayment = "GetBucketRequestPayment"
@@ -2056,8 +2537,23 @@ func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput)
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment
func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetBucketRequestPaymentOutput, error) {
req, out := c.GetBucketRequestPaymentRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// GetBucketRequestPaymentWithContext is the same as GetBucketRequestPayment with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketRequestPayment for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketRequestPaymentWithContext(ctx aws.Context, input *GetBucketRequestPaymentInput, opts ...request.Option) (*GetBucketRequestPaymentOutput, error) {
+ req, out := c.GetBucketRequestPaymentRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opGetBucketTagging = "GetBucketTagging"
@@ -2116,8 +2612,23 @@ func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging
func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOutput, error) {
req, out := c.GetBucketTaggingRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// GetBucketTaggingWithContext is the same as GetBucketTagging with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketTagging for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketTaggingWithContext(ctx aws.Context, input *GetBucketTaggingInput, opts ...request.Option) (*GetBucketTaggingOutput, error) {
+ req, out := c.GetBucketTaggingRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opGetBucketVersioning = "GetBucketVersioning"
@@ -2176,8 +2687,23 @@ func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *r
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning
func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) {
req, out := c.GetBucketVersioningRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// GetBucketVersioningWithContext is the same as GetBucketVersioning with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketVersioning for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketVersioningWithContext(ctx aws.Context, input *GetBucketVersioningInput, opts ...request.Option) (*GetBucketVersioningOutput, error) {
+ req, out := c.GetBucketVersioningRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opGetBucketWebsite = "GetBucketWebsite"
@@ -2236,8 +2762,23 @@ func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite
func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) {
req, out := c.GetBucketWebsiteRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// GetBucketWebsiteWithContext is the same as GetBucketWebsite with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetBucketWebsite for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetBucketWebsiteWithContext(ctx aws.Context, input *GetBucketWebsiteInput, opts ...request.Option) (*GetBucketWebsiteOutput, error) {
+ req, out := c.GetBucketWebsiteRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opGetObject = "GetObject"
@@ -2301,8 +2842,23 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject
func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) {
req, out := c.GetObjectRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// GetObjectWithContext is the same as GetObject with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetObject for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetObjectWithContext(ctx aws.Context, input *GetObjectInput, opts ...request.Option) (*GetObjectOutput, error) {
+ req, out := c.GetObjectRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opGetObjectAcl = "GetObjectAcl"
@@ -2366,8 +2922,23 @@ func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl
func (c *S3) GetObjectAcl(input *GetObjectAclInput) (*GetObjectAclOutput, error) {
req, out := c.GetObjectAclRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// GetObjectAclWithContext is the same as GetObjectAcl with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetObjectAcl for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetObjectAclWithContext(ctx aws.Context, input *GetObjectAclInput, opts ...request.Option) (*GetObjectAclOutput, error) {
+ req, out := c.GetObjectAclRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opGetObjectTagging = "GetObjectTagging"
@@ -2426,8 +2997,23 @@ func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging
func (c *S3) GetObjectTagging(input *GetObjectTaggingInput) (*GetObjectTaggingOutput, error) {
req, out := c.GetObjectTaggingRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// GetObjectTaggingWithContext is the same as GetObjectTagging with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetObjectTagging for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetObjectTaggingWithContext(ctx aws.Context, input *GetObjectTaggingInput, opts ...request.Option) (*GetObjectTaggingOutput, error) {
+ req, out := c.GetObjectTaggingRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opGetObjectTorrent = "GetObjectTorrent"
@@ -2486,8 +3072,23 @@ func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent
func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) {
req, out := c.GetObjectTorrentRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// GetObjectTorrentWithContext is the same as GetObjectTorrent with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetObjectTorrent for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) GetObjectTorrentWithContext(ctx aws.Context, input *GetObjectTorrentInput, opts ...request.Option) (*GetObjectTorrentOutput, error) {
+ req, out := c.GetObjectTorrentRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opHeadBucket = "HeadBucket"
@@ -2554,8 +3155,23 @@ func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, ou
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket
func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) {
req, out := c.HeadBucketRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// HeadBucketWithContext is the same as HeadBucket with the addition of
+// the ability to pass a context and additional request options.
+//
+// See HeadBucket for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) HeadBucketWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.Option) (*HeadBucketOutput, error) {
+ req, out := c.HeadBucketRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opHeadObject = "HeadObject"
@@ -2621,8 +3237,23 @@ func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, ou
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject
func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) {
req, out := c.HeadObjectRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// HeadObjectWithContext is the same as HeadObject with the addition of
+// the ability to pass a context and additional request options.
+//
+// See HeadObject for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) HeadObjectWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.Option) (*HeadObjectOutput, error) {
+ req, out := c.HeadObjectRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opListBucketAnalyticsConfigurations = "ListBucketAnalyticsConfigurations"
@@ -2681,8 +3312,23 @@ func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalytics
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations
func (c *S3) ListBucketAnalyticsConfigurations(input *ListBucketAnalyticsConfigurationsInput) (*ListBucketAnalyticsConfigurationsOutput, error) {
req, out := c.ListBucketAnalyticsConfigurationsRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// ListBucketAnalyticsConfigurationsWithContext is the same as ListBucketAnalyticsConfigurations with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListBucketAnalyticsConfigurations for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListBucketAnalyticsConfigurationsWithContext(ctx aws.Context, input *ListBucketAnalyticsConfigurationsInput, opts ...request.Option) (*ListBucketAnalyticsConfigurationsOutput, error) {
+ req, out := c.ListBucketAnalyticsConfigurationsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opListBucketInventoryConfigurations = "ListBucketInventoryConfigurations"
@@ -2741,8 +3387,23 @@ func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventory
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations
func (c *S3) ListBucketInventoryConfigurations(input *ListBucketInventoryConfigurationsInput) (*ListBucketInventoryConfigurationsOutput, error) {
req, out := c.ListBucketInventoryConfigurationsRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// ListBucketInventoryConfigurationsWithContext is the same as ListBucketInventoryConfigurations with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListBucketInventoryConfigurations for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListBucketInventoryConfigurationsWithContext(ctx aws.Context, input *ListBucketInventoryConfigurationsInput, opts ...request.Option) (*ListBucketInventoryConfigurationsOutput, error) {
+ req, out := c.ListBucketInventoryConfigurationsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opListBucketMetricsConfigurations = "ListBucketMetricsConfigurations"
@@ -2801,8 +3462,23 @@ func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConf
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations
func (c *S3) ListBucketMetricsConfigurations(input *ListBucketMetricsConfigurationsInput) (*ListBucketMetricsConfigurationsOutput, error) {
req, out := c.ListBucketMetricsConfigurationsRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// ListBucketMetricsConfigurationsWithContext is the same as ListBucketMetricsConfigurations with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListBucketMetricsConfigurations for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListBucketMetricsConfigurationsWithContext(ctx aws.Context, input *ListBucketMetricsConfigurationsInput, opts ...request.Option) (*ListBucketMetricsConfigurationsOutput, error) {
+ req, out := c.ListBucketMetricsConfigurationsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opListBuckets = "ListBuckets"
@@ -2861,8 +3537,23 @@ func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request,
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets
func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) {
req, out := c.ListBucketsRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// ListBucketsWithContext is the same as ListBuckets with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListBuckets for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListBucketsWithContext(ctx aws.Context, input *ListBucketsInput, opts ...request.Option) (*ListBucketsOutput, error) {
+ req, out := c.ListBucketsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opListMultipartUploads = "ListMultipartUploads"
@@ -2927,8 +3618,23 @@ func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads
func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) {
req, out := c.ListMultipartUploadsRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// ListMultipartUploadsWithContext is the same as ListMultipartUploads with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListMultipartUploads for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListMultipartUploadsWithContext(ctx aws.Context, input *ListMultipartUploadsInput, opts ...request.Option) (*ListMultipartUploadsOutput, error) {
+ req, out := c.ListMultipartUploadsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
// ListMultipartUploadsPages iterates over the pages of a ListMultipartUploads operation,
@@ -2948,12 +3654,37 @@ func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultip
// return pageNum <= 3
// })
//
-func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(p *ListMultipartUploadsOutput, lastPage bool) (shouldContinue bool)) error {
- page, _ := c.ListMultipartUploadsRequest(input)
- page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator"))
- return page.EachPage(func(p interface{}, lastPage bool) bool {
- return fn(p.(*ListMultipartUploadsOutput), lastPage)
- })
+func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool) error {
+ return c.ListMultipartUploadsPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// ListMultipartUploadsPagesWithContext same as ListMultipartUploadsPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListMultipartUploadsPagesWithContext(ctx aws.Context, input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *ListMultipartUploadsInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.ListMultipartUploadsRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ cont := true
+ for p.Next() && cont {
+ cont = fn(p.Page().(*ListMultipartUploadsOutput), !p.HasNextPage())
+ }
+ return p.Err()
}
const opListObjectVersions = "ListObjectVersions"
@@ -3018,8 +3749,23 @@ func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *req
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions
func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) {
req, out := c.ListObjectVersionsRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// ListObjectVersionsWithContext is the same as ListObjectVersions with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListObjectVersions for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListObjectVersionsWithContext(ctx aws.Context, input *ListObjectVersionsInput, opts ...request.Option) (*ListObjectVersionsOutput, error) {
+ req, out := c.ListObjectVersionsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
// ListObjectVersionsPages iterates over the pages of a ListObjectVersions operation,
@@ -3039,12 +3785,37 @@ func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVers
// return pageNum <= 3
// })
//
-func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(p *ListObjectVersionsOutput, lastPage bool) (shouldContinue bool)) error {
- page, _ := c.ListObjectVersionsRequest(input)
- page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator"))
- return page.EachPage(func(p interface{}, lastPage bool) bool {
- return fn(p.(*ListObjectVersionsOutput), lastPage)
- })
+func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool) error {
+ return c.ListObjectVersionsPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// ListObjectVersionsPagesWithContext same as ListObjectVersionsPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListObjectVersionsPagesWithContext(ctx aws.Context, input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *ListObjectVersionsInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.ListObjectVersionsRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ cont := true
+ for p.Next() && cont {
+ cont = fn(p.Page().(*ListObjectVersionsOutput), !p.HasNextPage())
+ }
+ return p.Err()
}
const opListObjects = "ListObjects"
@@ -3116,8 +3887,23 @@ func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request,
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects
func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) {
req, out := c.ListObjectsRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// ListObjectsWithContext is the same as ListObjects with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListObjects for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListObjectsWithContext(ctx aws.Context, input *ListObjectsInput, opts ...request.Option) (*ListObjectsOutput, error) {
+ req, out := c.ListObjectsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
// ListObjectsPages iterates over the pages of a ListObjects operation,
@@ -3137,12 +3923,37 @@ func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) {
// return pageNum <= 3
// })
//
-func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(p *ListObjectsOutput, lastPage bool) (shouldContinue bool)) error {
- page, _ := c.ListObjectsRequest(input)
- page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator"))
- return page.EachPage(func(p interface{}, lastPage bool) bool {
- return fn(p.(*ListObjectsOutput), lastPage)
- })
+func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool) error {
+ return c.ListObjectsPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// ListObjectsPagesWithContext same as ListObjectsPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListObjectsPagesWithContext(ctx aws.Context, input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *ListObjectsInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.ListObjectsRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ cont := true
+ for p.Next() && cont {
+ cont = fn(p.Page().(*ListObjectsOutput), !p.HasNextPage())
+ }
+ return p.Err()
}
const opListObjectsV2 = "ListObjectsV2"
@@ -3215,8 +4026,23 @@ func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Reque
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2
func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, error) {
req, out := c.ListObjectsV2Request(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// ListObjectsV2WithContext is the same as ListObjectsV2 with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListObjectsV2 for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListObjectsV2WithContext(ctx aws.Context, input *ListObjectsV2Input, opts ...request.Option) (*ListObjectsV2Output, error) {
+ req, out := c.ListObjectsV2Request(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
// ListObjectsV2Pages iterates over the pages of a ListObjectsV2 operation,
@@ -3236,12 +4062,37 @@ func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, err
// return pageNum <= 3
// })
//
-func (c *S3) ListObjectsV2Pages(input *ListObjectsV2Input, fn func(p *ListObjectsV2Output, lastPage bool) (shouldContinue bool)) error {
- page, _ := c.ListObjectsV2Request(input)
- page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator"))
- return page.EachPage(func(p interface{}, lastPage bool) bool {
- return fn(p.(*ListObjectsV2Output), lastPage)
- })
+func (c *S3) ListObjectsV2Pages(input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool) error {
+ return c.ListObjectsV2PagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// ListObjectsV2PagesWithContext same as ListObjectsV2Pages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListObjectsV2PagesWithContext(ctx aws.Context, input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *ListObjectsV2Input
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.ListObjectsV2Request(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ cont := true
+ for p.Next() && cont {
+ cont = fn(p.Page().(*ListObjectsV2Output), !p.HasNextPage())
+ }
+ return p.Err()
}
const opListParts = "ListParts"
@@ -3306,8 +4157,23 @@ func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, outp
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts
func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) {
req, out := c.ListPartsRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// ListPartsWithContext is the same as ListParts with the addition of
+// the ability to pass a context and additional request options.
+//
+// See ListParts for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListPartsWithContext(ctx aws.Context, input *ListPartsInput, opts ...request.Option) (*ListPartsOutput, error) {
+ req, out := c.ListPartsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
// ListPartsPages iterates over the pages of a ListParts operation,
@@ -3327,12 +4193,37 @@ func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) {
// return pageNum <= 3
// })
//
-func (c *S3) ListPartsPages(input *ListPartsInput, fn func(p *ListPartsOutput, lastPage bool) (shouldContinue bool)) error {
- page, _ := c.ListPartsRequest(input)
- page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator"))
- return page.EachPage(func(p interface{}, lastPage bool) bool {
- return fn(p.(*ListPartsOutput), lastPage)
- })
+func (c *S3) ListPartsPages(input *ListPartsInput, fn func(*ListPartsOutput, bool) bool) error {
+ return c.ListPartsPagesWithContext(aws.BackgroundContext(), input, fn)
+}
+
+// ListPartsPagesWithContext same as ListPartsPages except
+// it takes a Context and allows setting request options on the pages.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) ListPartsPagesWithContext(ctx aws.Context, input *ListPartsInput, fn func(*ListPartsOutput, bool) bool, opts ...request.Option) error {
+ p := request.Pagination{
+ NewRequest: func() (*request.Request, error) {
+ var inCpy *ListPartsInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.ListPartsRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
+ }
+
+ cont := true
+ for p.Next() && cont {
+ cont = fn(p.Page().(*ListPartsOutput), !p.HasNextPage())
+ }
+ return p.Err()
}
const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration"
@@ -3393,8 +4284,23 @@ func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateC
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration
func (c *S3) PutBucketAccelerateConfiguration(input *PutBucketAccelerateConfigurationInput) (*PutBucketAccelerateConfigurationOutput, error) {
req, out := c.PutBucketAccelerateConfigurationRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// PutBucketAccelerateConfigurationWithContext is the same as PutBucketAccelerateConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketAccelerateConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketAccelerateConfigurationWithContext(ctx aws.Context, input *PutBucketAccelerateConfigurationInput, opts ...request.Option) (*PutBucketAccelerateConfigurationOutput, error) {
+ req, out := c.PutBucketAccelerateConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opPutBucketAcl = "PutBucketAcl"
@@ -3455,8 +4361,23 @@ func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl
func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) {
req, out := c.PutBucketAclRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// PutBucketAclWithContext is the same as PutBucketAcl with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketAcl for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketAclWithContext(ctx aws.Context, input *PutBucketAclInput, opts ...request.Option) (*PutBucketAclOutput, error) {
+ req, out := c.PutBucketAclRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opPutBucketAnalyticsConfiguration = "PutBucketAnalyticsConfiguration"
@@ -3518,8 +4439,23 @@ func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsCon
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration
func (c *S3) PutBucketAnalyticsConfiguration(input *PutBucketAnalyticsConfigurationInput) (*PutBucketAnalyticsConfigurationOutput, error) {
req, out := c.PutBucketAnalyticsConfigurationRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// PutBucketAnalyticsConfigurationWithContext is the same as PutBucketAnalyticsConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketAnalyticsConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *PutBucketAnalyticsConfigurationInput, opts ...request.Option) (*PutBucketAnalyticsConfigurationOutput, error) {
+ req, out := c.PutBucketAnalyticsConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opPutBucketCors = "PutBucketCors"
@@ -3580,8 +4516,23 @@ func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Reque
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors
func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, error) {
req, out := c.PutBucketCorsRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// PutBucketCorsWithContext is the same as PutBucketCors with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketCors for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketCorsWithContext(ctx aws.Context, input *PutBucketCorsInput, opts ...request.Option) (*PutBucketCorsOutput, error) {
+ req, out := c.PutBucketCorsRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration"
@@ -3643,8 +4594,23 @@ func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryCon
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration
func (c *S3) PutBucketInventoryConfiguration(input *PutBucketInventoryConfigurationInput) (*PutBucketInventoryConfigurationOutput, error) {
req, out := c.PutBucketInventoryConfigurationRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// PutBucketInventoryConfigurationWithContext is the same as PutBucketInventoryConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketInventoryConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketInventoryConfigurationWithContext(ctx aws.Context, input *PutBucketInventoryConfigurationInput, opts ...request.Option) (*PutBucketInventoryConfigurationOutput, error) {
+ req, out := c.PutBucketInventoryConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opPutBucketLifecycle = "PutBucketLifecycle"
@@ -3708,8 +4674,23 @@ func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *req
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle
func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) {
req, out := c.PutBucketLifecycleRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// PutBucketLifecycleWithContext is the same as PutBucketLifecycle with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketLifecycle for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketLifecycleWithContext(ctx aws.Context, input *PutBucketLifecycleInput, opts ...request.Option) (*PutBucketLifecycleOutput, error) {
+ req, out := c.PutBucketLifecycleRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration"
@@ -3771,8 +4752,23 @@ func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleCon
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration
func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) {
req, out := c.PutBucketLifecycleConfigurationRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// PutBucketLifecycleConfigurationWithContext is the same as PutBucketLifecycleConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketLifecycleConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketLifecycleConfigurationWithContext(ctx aws.Context, input *PutBucketLifecycleConfigurationInput, opts ...request.Option) (*PutBucketLifecycleConfigurationOutput, error) {
+ req, out := c.PutBucketLifecycleConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opPutBucketLogging = "PutBucketLogging"
@@ -3835,8 +4831,23 @@ func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging
func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) {
req, out := c.PutBucketLoggingRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// PutBucketLoggingWithContext is the same as PutBucketLogging with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketLogging for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketLoggingWithContext(ctx aws.Context, input *PutBucketLoggingInput, opts ...request.Option) (*PutBucketLoggingOutput, error) {
+ req, out := c.PutBucketLoggingRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opPutBucketMetricsConfiguration = "PutBucketMetricsConfiguration"
@@ -3898,8 +4909,23 @@ func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigu
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration
func (c *S3) PutBucketMetricsConfiguration(input *PutBucketMetricsConfigurationInput) (*PutBucketMetricsConfigurationOutput, error) {
req, out := c.PutBucketMetricsConfigurationRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// PutBucketMetricsConfigurationWithContext is the same as PutBucketMetricsConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketMetricsConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketMetricsConfigurationWithContext(ctx aws.Context, input *PutBucketMetricsConfigurationInput, opts ...request.Option) (*PutBucketMetricsConfigurationOutput, error) {
+ req, out := c.PutBucketMetricsConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opPutBucketNotification = "PutBucketNotification"
@@ -3963,8 +4989,23 @@ func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (re
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification
func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) {
req, out := c.PutBucketNotificationRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// PutBucketNotificationWithContext is the same as PutBucketNotification with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketNotification for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketNotificationWithContext(ctx aws.Context, input *PutBucketNotificationInput, opts ...request.Option) (*PutBucketNotificationOutput, error) {
+ req, out := c.PutBucketNotificationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration"
@@ -4025,8 +5066,23 @@ func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificat
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration
func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) {
req, out := c.PutBucketNotificationConfigurationRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// PutBucketNotificationConfigurationWithContext is the same as PutBucketNotificationConfiguration with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketNotificationConfiguration for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketNotificationConfigurationWithContext(ctx aws.Context, input *PutBucketNotificationConfigurationInput, opts ...request.Option) (*PutBucketNotificationConfigurationOutput, error) {
+ req, out := c.PutBucketNotificationConfigurationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opPutBucketPolicy = "PutBucketPolicy"
@@ -4088,8 +5144,23 @@ func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.R
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy
func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) {
req, out := c.PutBucketPolicyRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// PutBucketPolicyWithContext is the same as PutBucketPolicy with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketPolicy for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketPolicyWithContext(ctx aws.Context, input *PutBucketPolicyInput, opts ...request.Option) (*PutBucketPolicyOutput, error) {
+ req, out := c.PutBucketPolicyRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opPutBucketReplication = "PutBucketReplication"
@@ -4151,8 +5222,23 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication
func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) {
req, out := c.PutBucketReplicationRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// PutBucketReplicationWithContext is the same as PutBucketReplication with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketReplication for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketReplicationWithContext(ctx aws.Context, input *PutBucketReplicationInput, opts ...request.Option) (*PutBucketReplicationOutput, error) {
+ req, out := c.PutBucketReplicationRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opPutBucketRequestPayment = "PutBucketRequestPayment"
@@ -4217,8 +5303,23 @@ func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput)
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment
func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) {
req, out := c.PutBucketRequestPaymentRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// PutBucketRequestPaymentWithContext is the same as PutBucketRequestPayment with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketRequestPayment for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketRequestPaymentWithContext(ctx aws.Context, input *PutBucketRequestPaymentInput, opts ...request.Option) (*PutBucketRequestPaymentOutput, error) {
+ req, out := c.PutBucketRequestPaymentRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opPutBucketTagging = "PutBucketTagging"
@@ -4279,8 +5380,23 @@ func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging
func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) {
req, out := c.PutBucketTaggingRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// PutBucketTaggingWithContext is the same as PutBucketTagging with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketTagging for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketTaggingWithContext(ctx aws.Context, input *PutBucketTaggingInput, opts ...request.Option) (*PutBucketTaggingOutput, error) {
+ req, out := c.PutBucketTaggingRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opPutBucketVersioning = "PutBucketVersioning"
@@ -4342,8 +5458,23 @@ func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *r
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning
func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) {
req, out := c.PutBucketVersioningRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// PutBucketVersioningWithContext is the same as PutBucketVersioning with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketVersioning for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketVersioningWithContext(ctx aws.Context, input *PutBucketVersioningInput, opts ...request.Option) (*PutBucketVersioningOutput, error) {
+ req, out := c.PutBucketVersioningRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opPutBucketWebsite = "PutBucketWebsite"
@@ -4404,8 +5535,23 @@ func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite
func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) {
req, out := c.PutBucketWebsiteRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// PutBucketWebsiteWithContext is the same as PutBucketWebsite with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutBucketWebsite for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutBucketWebsiteWithContext(ctx aws.Context, input *PutBucketWebsiteInput, opts ...request.Option) (*PutBucketWebsiteOutput, error) {
+ req, out := c.PutBucketWebsiteRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opPutObject = "PutObject"
@@ -4464,8 +5610,23 @@ func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, outp
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject
func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) {
req, out := c.PutObjectRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// PutObjectWithContext is the same as PutObject with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutObject for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutObjectWithContext(ctx aws.Context, input *PutObjectInput, opts ...request.Option) (*PutObjectOutput, error) {
+ req, out := c.PutObjectRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opPutObjectAcl = "PutObjectAcl"
@@ -4530,8 +5691,23 @@ func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl
func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) {
req, out := c.PutObjectAclRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// PutObjectAclWithContext is the same as PutObjectAcl with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutObjectAcl for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutObjectAclWithContext(ctx aws.Context, input *PutObjectAclInput, opts ...request.Option) (*PutObjectAclOutput, error) {
+ req, out := c.PutObjectAclRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opPutObjectTagging = "PutObjectTagging"
@@ -4590,8 +5766,23 @@ func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging
func (c *S3) PutObjectTagging(input *PutObjectTaggingInput) (*PutObjectTaggingOutput, error) {
req, out := c.PutObjectTaggingRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// PutObjectTaggingWithContext is the same as PutObjectTagging with the addition of
+// the ability to pass a context and additional request options.
+//
+// See PutObjectTagging for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) PutObjectTaggingWithContext(ctx aws.Context, input *PutObjectTaggingInput, opts ...request.Option) (*PutObjectTaggingOutput, error) {
+ req, out := c.PutObjectTaggingRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opRestoreObject = "RestoreObject"
@@ -4655,8 +5846,23 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject
func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) {
req, out := c.RestoreObjectRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// RestoreObjectWithContext is the same as RestoreObject with the addition of
+// the ability to pass a context and additional request options.
+//
+// See RestoreObject for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) RestoreObjectWithContext(ctx aws.Context, input *RestoreObjectInput, opts ...request.Option) (*RestoreObjectOutput, error) {
+ req, out := c.RestoreObjectRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opUploadPart = "UploadPart"
@@ -4721,8 +5927,23 @@ func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, ou
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart
func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) {
req, out := c.UploadPartRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// UploadPartWithContext is the same as UploadPart with the addition of
+// the ability to pass a context and additional request options.
+//
+// See UploadPart for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) UploadPartWithContext(ctx aws.Context, input *UploadPartInput, opts ...request.Option) (*UploadPartOutput, error) {
+ req, out := c.UploadPartRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opUploadPartCopy = "UploadPartCopy"
@@ -4781,8 +6002,23 @@ func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Req
// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy
func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) {
req, out := c.UploadPartCopyRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// UploadPartCopyWithContext is the same as UploadPartCopy with the addition of
+// the ability to pass a context and additional request options.
+//
+// See UploadPartCopy for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) UploadPartCopyWithContext(ctx aws.Context, input *UploadPartCopyInput, opts ...request.Option) (*UploadPartCopyOutput, error) {
+ req, out := c.UploadPartCopyRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
// Specifies the days since the initiation of an Incomplete Multipart Upload
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go
index 13ebbda..931cb17 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go
@@ -1,4 +1,4 @@
-// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package s3
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go
index 5e6f229..3fb5b3b 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go
@@ -1,4 +1,4 @@
-// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package s3
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go
index ed91c58..bcca862 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go
@@ -23,17 +23,22 @@ func unmarshalError(r *request.Request) {
defer r.HTTPResponse.Body.Close()
defer io.Copy(ioutil.Discard, r.HTTPResponse.Body)
+ hostID := r.HTTPResponse.Header.Get("X-Amz-Id-2")
+
// Bucket exists in a different region, and request needs
// to be made to the correct region.
if r.HTTPResponse.StatusCode == http.StatusMovedPermanently {
- r.Error = awserr.NewRequestFailure(
- awserr.New("BucketRegionError",
- fmt.Sprintf("incorrect region, the bucket is not in '%s' region",
- aws.StringValue(r.Config.Region)),
- nil),
- r.HTTPResponse.StatusCode,
- r.RequestID,
- )
+ r.Error = requestFailure{
+ RequestFailure: awserr.NewRequestFailure(
+ awserr.New("BucketRegionError",
+ fmt.Sprintf("incorrect region, the bucket is not in '%s' region",
+ aws.StringValue(r.Config.Region)),
+ nil),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ ),
+ hostID: hostID,
+ }
return
}
@@ -48,6 +53,7 @@ func unmarshalError(r *request.Request) {
} else {
errCode = resp.Code
errMsg = resp.Message
+ err = nil
}
// Fallback to status code converted to message if still no error code
@@ -57,9 +63,41 @@ func unmarshalError(r *request.Request) {
errMsg = statusText
}
- r.Error = awserr.NewRequestFailure(
- awserr.New(errCode, errMsg, nil),
- r.HTTPResponse.StatusCode,
- r.RequestID,
- )
+ r.Error = requestFailure{
+ RequestFailure: awserr.NewRequestFailure(
+ awserr.New(errCode, errMsg, err),
+ r.HTTPResponse.StatusCode,
+ r.RequestID,
+ ),
+ hostID: hostID,
+ }
+}
+
+// A RequestFailure provides access to the S3 Request ID and Host ID values
+// returned from API operation errors. Getting the error as a string will
+// return the formated error with the same information as awserr.RequestFailure,
+// while also adding the HostID value from the response.
+type RequestFailure interface {
+ awserr.RequestFailure
+
+ // Host ID is the S3 Host ID needed for debug, and contacting support
+ HostID() string
+}
+
+type requestFailure struct {
+ awserr.RequestFailure
+
+ hostID string
+}
+
+func (r requestFailure) Error() string {
+ extra := fmt.Sprintf("status code: %d, request id: %s, host id: %s",
+ r.StatusCode(), r.RequestID(), r.hostID)
+ return awserr.SprintError(r.Code(), r.Message(), extra, r.OrigErr())
+}
+func (r requestFailure) String() string {
+ return r.Error()
+}
+func (r requestFailure) HostID() string {
+ return r.hostID
}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go
index 5e16be4..cccfa8c 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go
@@ -1,9 +1,12 @@
-// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package s3
import (
- "github.com/aws/aws-sdk-go/private/waiter"
+ "time"
+
+ "github.com/aws/aws-sdk-go/aws"
+ "github.com/aws/aws-sdk-go/aws/request"
)
// WaitUntilBucketExists uses the Amazon S3 API operation
@@ -11,44 +14,60 @@ import (
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error {
- waiterCfg := waiter.Config{
- Operation: "HeadBucket",
- Delay: 5,
+ return c.WaitUntilBucketExistsWithContext(aws.BackgroundContext(), input)
+}
+
+// WaitUntilBucketExistsWithContext is an extended version of WaitUntilBucketExists.
+// With the support for passing in a context and options to configure the
+// Waiter and the underlying request options.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) WaitUntilBucketExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error {
+ w := request.Waiter{
+ Name: "WaitUntilBucketExists",
MaxAttempts: 20,
- Acceptors: []waiter.WaitAcceptor{
+ Delay: request.ConstantWaiterDelay(5 * time.Second),
+ Acceptors: []request.WaiterAcceptor{
{
- State: "success",
- Matcher: "status",
- Argument: "",
+ State: request.SuccessWaiterState,
+ Matcher: request.StatusWaiterMatch,
Expected: 200,
},
{
- State: "success",
- Matcher: "status",
- Argument: "",
+ State: request.SuccessWaiterState,
+ Matcher: request.StatusWaiterMatch,
Expected: 301,
},
{
- State: "success",
- Matcher: "status",
- Argument: "",
+ State: request.SuccessWaiterState,
+ Matcher: request.StatusWaiterMatch,
Expected: 403,
},
{
- State: "retry",
- Matcher: "status",
- Argument: "",
+ State: request.RetryWaiterState,
+ Matcher: request.StatusWaiterMatch,
Expected: 404,
},
},
+ Logger: c.Config.Logger,
+ NewRequest: func(opts []request.Option) (*request.Request, error) {
+ var inCpy *HeadBucketInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.HeadBucketRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
}
+ w.ApplyOptions(opts...)
- w := waiter.Waiter{
- Client: c,
- Input: input,
- Config: waiterCfg,
- }
- return w.Wait()
+ return w.WaitWithContext(ctx)
}
// WaitUntilBucketNotExists uses the Amazon S3 API operation
@@ -56,26 +75,45 @@ func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error {
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error {
- waiterCfg := waiter.Config{
- Operation: "HeadBucket",
- Delay: 5,
+ return c.WaitUntilBucketNotExistsWithContext(aws.BackgroundContext(), input)
+}
+
+// WaitUntilBucketNotExistsWithContext is an extended version of WaitUntilBucketNotExists.
+// With the support for passing in a context and options to configure the
+// Waiter and the underlying request options.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) WaitUntilBucketNotExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error {
+ w := request.Waiter{
+ Name: "WaitUntilBucketNotExists",
MaxAttempts: 20,
- Acceptors: []waiter.WaitAcceptor{
+ Delay: request.ConstantWaiterDelay(5 * time.Second),
+ Acceptors: []request.WaiterAcceptor{
{
- State: "success",
- Matcher: "status",
- Argument: "",
+ State: request.SuccessWaiterState,
+ Matcher: request.StatusWaiterMatch,
Expected: 404,
},
},
+ Logger: c.Config.Logger,
+ NewRequest: func(opts []request.Option) (*request.Request, error) {
+ var inCpy *HeadBucketInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.HeadBucketRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
}
+ w.ApplyOptions(opts...)
- w := waiter.Waiter{
- Client: c,
- Input: input,
- Config: waiterCfg,
- }
- return w.Wait()
+ return w.WaitWithContext(ctx)
}
// WaitUntilObjectExists uses the Amazon S3 API operation
@@ -83,32 +121,50 @@ func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error {
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error {
- waiterCfg := waiter.Config{
- Operation: "HeadObject",
- Delay: 5,
+ return c.WaitUntilObjectExistsWithContext(aws.BackgroundContext(), input)
+}
+
+// WaitUntilObjectExistsWithContext is an extended version of WaitUntilObjectExists.
+// With the support for passing in a context and options to configure the
+// Waiter and the underlying request options.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) WaitUntilObjectExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error {
+ w := request.Waiter{
+ Name: "WaitUntilObjectExists",
MaxAttempts: 20,
- Acceptors: []waiter.WaitAcceptor{
+ Delay: request.ConstantWaiterDelay(5 * time.Second),
+ Acceptors: []request.WaiterAcceptor{
{
- State: "success",
- Matcher: "status",
- Argument: "",
+ State: request.SuccessWaiterState,
+ Matcher: request.StatusWaiterMatch,
Expected: 200,
},
{
- State: "retry",
- Matcher: "status",
- Argument: "",
+ State: request.RetryWaiterState,
+ Matcher: request.StatusWaiterMatch,
Expected: 404,
},
},
+ Logger: c.Config.Logger,
+ NewRequest: func(opts []request.Option) (*request.Request, error) {
+ var inCpy *HeadObjectInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.HeadObjectRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
}
+ w.ApplyOptions(opts...)
- w := waiter.Waiter{
- Client: c,
- Input: input,
- Config: waiterCfg,
- }
- return w.Wait()
+ return w.WaitWithContext(ctx)
}
// WaitUntilObjectNotExists uses the Amazon S3 API operation
@@ -116,24 +172,43 @@ func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error {
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error {
- waiterCfg := waiter.Config{
- Operation: "HeadObject",
- Delay: 5,
+ return c.WaitUntilObjectNotExistsWithContext(aws.BackgroundContext(), input)
+}
+
+// WaitUntilObjectNotExistsWithContext is an extended version of WaitUntilObjectNotExists.
+// With the support for passing in a context and options to configure the
+// Waiter and the underlying request options.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *S3) WaitUntilObjectNotExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error {
+ w := request.Waiter{
+ Name: "WaitUntilObjectNotExists",
MaxAttempts: 20,
- Acceptors: []waiter.WaitAcceptor{
+ Delay: request.ConstantWaiterDelay(5 * time.Second),
+ Acceptors: []request.WaiterAcceptor{
{
- State: "success",
- Matcher: "status",
- Argument: "",
+ State: request.SuccessWaiterState,
+ Matcher: request.StatusWaiterMatch,
Expected: 404,
},
},
+ Logger: c.Config.Logger,
+ NewRequest: func(opts []request.Option) (*request.Request, error) {
+ var inCpy *HeadObjectInput
+ if input != nil {
+ tmp := *input
+ inCpy = &tmp
+ }
+ req, _ := c.HeadObjectRequest(inCpy)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return req, nil
+ },
}
+ w.ApplyOptions(opts...)
- w := waiter.Waiter{
- Client: c,
- Input: input,
- Config: waiterCfg,
- }
- return w.Wait()
+ return w.WaitWithContext(ctx)
}
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
index ad42b4c..19dd0bf 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go
@@ -1,4 +1,4 @@
-// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
// Package sts provides a client for AWS Security Token Service.
package sts
@@ -6,6 +6,7 @@ package sts
import (
"time"
+ "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/aws/request"
)
@@ -172,8 +173,23 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole
func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) {
req, out := c.AssumeRoleRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// AssumeRoleWithContext is the same as AssumeRole with the addition of
+// the ability to pass a context and additional request options.
+//
+// See AssumeRole for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) AssumeRoleWithContext(ctx aws.Context, input *AssumeRoleInput, opts ...request.Option) (*AssumeRoleOutput, error) {
+ req, out := c.AssumeRoleRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opAssumeRoleWithSAML = "AssumeRoleWithSAML"
@@ -331,8 +347,23 @@ func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *re
// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML
func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) {
req, out := c.AssumeRoleWithSAMLRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// AssumeRoleWithSAMLWithContext is the same as AssumeRoleWithSAML with the addition of
+// the ability to pass a context and additional request options.
+//
+// See AssumeRoleWithSAML for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) AssumeRoleWithSAMLWithContext(ctx aws.Context, input *AssumeRoleWithSAMLInput, opts ...request.Option) (*AssumeRoleWithSAMLOutput, error) {
+ req, out := c.AssumeRoleWithSAMLRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity"
@@ -519,8 +550,23 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity
func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) {
req, out := c.AssumeRoleWithWebIdentityRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// AssumeRoleWithWebIdentityWithContext is the same as AssumeRoleWithWebIdentity with the addition of
+// the ability to pass a context and additional request options.
+//
+// See AssumeRoleWithWebIdentity for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) AssumeRoleWithWebIdentityWithContext(ctx aws.Context, input *AssumeRoleWithWebIdentityInput, opts ...request.Option) (*AssumeRoleWithWebIdentityOutput, error) {
+ req, out := c.AssumeRoleWithWebIdentityRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage"
@@ -617,8 +663,23 @@ func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessag
// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage
func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) {
req, out := c.DecodeAuthorizationMessageRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// DecodeAuthorizationMessageWithContext is the same as DecodeAuthorizationMessage with the addition of
+// the ability to pass a context and additional request options.
+//
+// See DecodeAuthorizationMessage for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *DecodeAuthorizationMessageInput, opts ...request.Option) (*DecodeAuthorizationMessageOutput, error) {
+ req, out := c.DecodeAuthorizationMessageRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opGetCallerIdentity = "GetCallerIdentity"
@@ -678,8 +739,23 @@ func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *requ
// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity
func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) {
req, out := c.GetCallerIdentityRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// GetCallerIdentityWithContext is the same as GetCallerIdentity with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetCallerIdentity for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) GetCallerIdentityWithContext(ctx aws.Context, input *GetCallerIdentityInput, opts ...request.Option) (*GetCallerIdentityOutput, error) {
+ req, out := c.GetCallerIdentityRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opGetFederationToken = "GetFederationToken"
@@ -833,8 +909,23 @@ func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *re
// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken
func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) {
req, out := c.GetFederationTokenRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// GetFederationTokenWithContext is the same as GetFederationToken with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetFederationToken for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) GetFederationTokenWithContext(ctx aws.Context, input *GetFederationTokenInput, opts ...request.Option) (*GetFederationTokenOutput, error) {
+ req, out := c.GetFederationTokenRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
const opGetSessionToken = "GetSessionToken"
@@ -947,8 +1038,23 @@ func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.
// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken
func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) {
req, out := c.GetSessionTokenRequest(input)
- err := req.Send()
- return out, err
+ return out, req.Send()
+}
+
+// GetSessionTokenWithContext is the same as GetSessionToken with the addition of
+// the ability to pass a context and additional request options.
+//
+// See GetSessionToken for details on how to use this API operation.
+//
+// The context must be non-nil and will be used for request cancellation. If
+// the context is nil a panic will occur. In the future the SDK may create
+// sub-contexts for http.Requests. See https://golang.org/pkg/context/
+// for more information on using Contexts.
+func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionTokenInput, opts ...request.Option) (*GetSessionTokenOutput, error) {
+ req, out := c.GetSessionTokenRequest(input)
+ req.SetContext(ctx)
+ req.ApplyOptions(opts...)
+ return out, req.Send()
}
// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleRequest
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go
index dbcd667..e24884e 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go
@@ -1,4 +1,4 @@
-// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package sts
diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go
index 9c4bfb8..be21838 100644
--- a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go
+++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go
@@ -1,4 +1,4 @@
-// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT.
+// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package sts
diff --git a/vendor/github.com/fsnotify/fsnotify/inotify.go b/vendor/github.com/fsnotify/fsnotify/inotify.go
index bfa9dbc..d9fd1b8 100644
--- a/vendor/github.com/fsnotify/fsnotify/inotify.go
+++ b/vendor/github.com/fsnotify/fsnotify/inotify.go
@@ -24,7 +24,6 @@ type Watcher struct {
Events chan Event
Errors chan error
mu sync.Mutex // Map access
- cv *sync.Cond // sync removing on rm_watch with IN_IGNORE
fd int
poller *fdPoller
watches map[string]*watch // Map of inotify watches (key: path)
@@ -56,7 +55,6 @@ func NewWatcher() (*Watcher, error) {
done: make(chan struct{}),
doneResp: make(chan struct{}),
}
- w.cv = sync.NewCond(&w.mu)
go w.readEvents()
return w, nil
@@ -103,21 +101,23 @@ func (w *Watcher) Add(name string) error {
var flags uint32 = agnosticEvents
w.mu.Lock()
- watchEntry, found := w.watches[name]
- w.mu.Unlock()
- if found {
- watchEntry.flags |= flags
- flags |= unix.IN_MASK_ADD
+ defer w.mu.Unlock()
+ watchEntry := w.watches[name]
+ if watchEntry != nil {
+ flags |= watchEntry.flags | unix.IN_MASK_ADD
}
wd, errno := unix.InotifyAddWatch(w.fd, name, flags)
if wd == -1 {
return errno
}
- w.mu.Lock()
- w.watches[name] = &watch{wd: uint32(wd), flags: flags}
- w.paths[wd] = name
- w.mu.Unlock()
+ if watchEntry == nil {
+ w.watches[name] = &watch{wd: uint32(wd), flags: flags}
+ w.paths[wd] = name
+ } else {
+ watchEntry.wd = uint32(wd)
+ watchEntry.flags = flags
+ }
return nil
}
@@ -135,6 +135,13 @@ func (w *Watcher) Remove(name string) error {
if !ok {
return fmt.Errorf("can't remove non-existent inotify watch for: %s", name)
}
+
+ // We successfully removed the watch if InotifyRmWatch doesn't return an
+ // error, we need to clean up our internal state to ensure it matches
+ // inotify's kernel state.
+ delete(w.paths, int(watch.wd))
+ delete(w.watches, name)
+
// inotify_rm_watch will return EINVAL if the file has been deleted;
// the inotify will already have been removed.
// watches and pathes are deleted in ignoreLinux() implicitly and asynchronously
@@ -152,13 +159,6 @@ func (w *Watcher) Remove(name string) error {
return errno
}
- // wait until ignoreLinux() deleting maps
- exists := true
- for exists {
- w.cv.Wait()
- _, exists = w.watches[name]
- }
-
return nil
}
@@ -259,8 +259,17 @@ func (w *Watcher) readEvents() {
// the "Name" field with a valid filename. We retrieve the path of the watch from
// the "paths" map.
w.mu.Lock()
- name := w.paths[int(raw.Wd)]
+ name, ok := w.paths[int(raw.Wd)]
+ // IN_DELETE_SELF occurs when the file/directory being watched is removed.
+ // This is a sign to clean up the maps, otherwise we are no longer in sync
+ // with the inotify kernel state which has already deleted the watch
+ // automatically.
+ if ok && mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF {
+ delete(w.paths, int(raw.Wd))
+ delete(w.watches, name)
+ }
w.mu.Unlock()
+
if nameLen > 0 {
// Point "bytes" at the first byte of the filename
bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))
@@ -271,7 +280,7 @@ func (w *Watcher) readEvents() {
event := newEvent(name, mask)
// Send the events that are not ignored on the events channel
- if !event.ignoreLinux(w, raw.Wd, mask) {
+ if !event.ignoreLinux(mask) {
select {
case w.Events <- event:
case <-w.done:
@@ -288,15 +297,9 @@ func (w *Watcher) readEvents() {
// Certain types of events can be "ignored" and not sent over the Events
// channel. Such as events marked ignore by the kernel, or MODIFY events
// against files that do not exist.
-func (e *Event) ignoreLinux(w *Watcher, wd int32, mask uint32) bool {
+func (e *Event) ignoreLinux(mask uint32) bool {
// Ignore anything the inotify API says to ignore
if mask&unix.IN_IGNORED == unix.IN_IGNORED {
- w.mu.Lock()
- defer w.mu.Unlock()
- name := w.paths[int(wd)]
- delete(w.paths, int(wd))
- delete(w.watches, name)
- w.cv.Broadcast()
return true
}
diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go
index c343ded..5211d5a 100644
--- a/vendor/github.com/go-ini/ini/ini.go
+++ b/vendor/github.com/go-ini/ini/ini.go
@@ -37,7 +37,7 @@ const (
// Maximum allowed depth when recursively substituing variable names.
_DEPTH_VALUES = 99
- _VERSION = "1.24.0"
+ _VERSION = "1.27.0"
)
// Version returns current package version literal.
@@ -173,9 +173,13 @@ type LoadOptions struct {
Insensitive bool
// IgnoreContinuation indicates whether to ignore continuation lines while parsing.
IgnoreContinuation bool
+ // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value.
+ IgnoreInlineComment bool
// AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing.
// This type of keys are mostly used in my.cnf.
AllowBooleanKeys bool
+ // AllowShadows indicates whether to keep track of keys with same name under same section.
+ AllowShadows bool
// Some INI formats allow group blocks that store a block of raw content that doesn't otherwise
// conform to key/value pairs. Specify the names of those blocks here.
UnparseableSections []string
@@ -219,6 +223,12 @@ func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) {
return LoadSources(LoadOptions{Insensitive: true}, source, others...)
}
+// InsensitiveLoad has exactly same functionality as Load function
+// except it allows have shadow keys.
+func ShadowLoad(source interface{}, others ...interface{}) (*File, error) {
+ return LoadSources(LoadOptions{AllowShadows: true}, source, others...)
+}
+
// Empty returns an empty file object.
func Empty() *File {
// Ignore error here, we sure our data is good.
@@ -311,6 +321,11 @@ func (f *File) Sections() []*Section {
return sections
}
+// ChildSections returns a list of child sections of given section name.
+func (f *File) ChildSections(name string) []*Section {
+ return f.Section(name).ChildSections()
+}
+
// SectionStrings returns list of section names.
func (f *File) SectionStrings() []string {
list := make([]string, len(f.sectionList))
@@ -441,6 +456,7 @@ func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) {
}
alignSpaces := bytes.Repeat([]byte(" "), alignLength)
+ KEY_LIST:
for _, kname := range sec.keyList {
key := sec.Key(kname)
if len(key.Comment) > 0 {
@@ -467,31 +483,33 @@ func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) {
case strings.Contains(kname, "`"):
kname = `"""` + kname + `"""`
}
- if _, err = buf.WriteString(kname); err != nil {
- return 0, err
- }
- if key.isBooleanType {
- if kname != sec.keyList[len(sec.keyList)-1] {
- buf.WriteString(LineBreak)
+ for _, val := range key.ValueWithShadows() {
+ if _, err = buf.WriteString(kname); err != nil {
+ return 0, err
}
- continue
- }
- // Write out alignment spaces before "=" sign
- if PrettyFormat {
- buf.Write(alignSpaces[:alignLength-len(kname)])
- }
+ if key.isBooleanType {
+ if kname != sec.keyList[len(sec.keyList)-1] {
+ buf.WriteString(LineBreak)
+ }
+ continue KEY_LIST
+ }
- val := key.value
- // In case key value contains "\n", "`", "\"", "#" or ";"
- if strings.ContainsAny(val, "\n`") {
- val = `"""` + val + `"""`
- } else if strings.ContainsAny(val, "#;") {
- val = "`" + val + "`"
- }
- if _, err = buf.WriteString(equalSign + val + LineBreak); err != nil {
- return 0, err
+ // Write out alignment spaces before "=" sign
+ if PrettyFormat {
+ buf.Write(alignSpaces[:alignLength-len(kname)])
+ }
+
+ // In case key value contains "\n", "`", "\"", "#" or ";"
+ if strings.ContainsAny(val, "\n`") {
+ val = `"""` + val + `"""`
+ } else if strings.ContainsAny(val, "#;") {
+ val = "`" + val + "`"
+ }
+ if _, err = buf.WriteString(equalSign + val + LineBreak); err != nil {
+ return 0, err
+ }
}
}
diff --git a/vendor/github.com/go-ini/ini/key.go b/vendor/github.com/go-ini/ini/key.go
index 9738c55..838356a 100644
--- a/vendor/github.com/go-ini/ini/key.go
+++ b/vendor/github.com/go-ini/ini/key.go
@@ -15,6 +15,7 @@
package ini
import (
+ "errors"
"fmt"
"strconv"
"strings"
@@ -29,9 +30,42 @@ type Key struct {
isAutoIncrement bool
isBooleanType bool
+ isShadow bool
+ shadows []*Key
+
Comment string
}
+// newKey simply return a key object with given values.
+func newKey(s *Section, name, val string) *Key {
+ return &Key{
+ s: s,
+ name: name,
+ value: val,
+ }
+}
+
+func (k *Key) addShadow(val string) error {
+ if k.isShadow {
+ return errors.New("cannot add shadow to another shadow key")
+ } else if k.isAutoIncrement || k.isBooleanType {
+ return errors.New("cannot add shadow to auto-increment or boolean key")
+ }
+
+ shadow := newKey(k.s, k.name, val)
+ shadow.isShadow = true
+ k.shadows = append(k.shadows, shadow)
+ return nil
+}
+
+// AddShadow adds a new shadow key to itself.
+func (k *Key) AddShadow(val string) error {
+ if !k.s.f.options.AllowShadows {
+ return errors.New("shadow key is not allowed")
+ }
+ return k.addShadow(val)
+}
+
// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv
type ValueMapper func(string) string
@@ -45,16 +79,29 @@ func (k *Key) Value() string {
return k.value
}
-// String returns string representation of value.
-func (k *Key) String() string {
- val := k.value
+// ValueWithShadows returns raw values of key and its shadows if any.
+func (k *Key) ValueWithShadows() []string {
+ if len(k.shadows) == 0 {
+ return []string{k.value}
+ }
+ vals := make([]string, len(k.shadows)+1)
+ vals[0] = k.value
+ for i := range k.shadows {
+ vals[i+1] = k.shadows[i].value
+ }
+ return vals
+}
+
+// transformValue takes a raw value and transforms to its final string.
+func (k *Key) transformValue(val string) string {
if k.s.f.ValueMapper != nil {
val = k.s.f.ValueMapper(val)
}
- if strings.Index(val, "%") == -1 {
+
+ // Fail-fast if no indicate char found for recursive value
+ if !strings.Contains(val, "%") {
return val
}
-
for i := 0; i < _DEPTH_VALUES; i++ {
vr := varPattern.FindString(val)
if len(vr) == 0 {
@@ -78,6 +125,11 @@ func (k *Key) String() string {
return val
}
+// String returns string representation of value.
+func (k *Key) String() string {
+ return k.transformValue(k.value)
+}
+
// Validate accepts a validate function which can
// return modifed result as key value.
func (k *Key) Validate(fn func(string) string) string {
@@ -394,45 +446,65 @@ func (k *Key) Strings(delim string) []string {
vals := strings.Split(str, delim)
for i := range vals {
+ // vals[i] = k.transformValue(strings.TrimSpace(vals[i]))
vals[i] = strings.TrimSpace(vals[i])
}
return vals
}
+// StringsWithShadows returns list of string divided by given delimiter.
+// Shadows will also be appended if any.
+func (k *Key) StringsWithShadows(delim string) []string {
+ vals := k.ValueWithShadows()
+ results := make([]string, 0, len(vals)*2)
+ for i := range vals {
+ if len(vals) == 0 {
+ continue
+ }
+
+ results = append(results, strings.Split(vals[i], delim)...)
+ }
+
+ for i := range results {
+ results[i] = k.transformValue(strings.TrimSpace(results[i]))
+ }
+ return results
+}
+
// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Float64s(delim string) []float64 {
- vals, _ := k.getFloat64s(delim, true, false)
+ vals, _ := k.parseFloat64s(k.Strings(delim), true, false)
return vals
}
// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Ints(delim string) []int {
- vals, _ := k.getInts(delim, true, false)
+ vals, _ := k.parseInts(k.Strings(delim), true, false)
return vals
}
// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Int64s(delim string) []int64 {
- vals, _ := k.getInt64s(delim, true, false)
+ vals, _ := k.parseInt64s(k.Strings(delim), true, false)
return vals
}
// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Uints(delim string) []uint {
- vals, _ := k.getUints(delim, true, false)
+ vals, _ := k.parseUints(k.Strings(delim), true, false)
return vals
}
// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value.
func (k *Key) Uint64s(delim string) []uint64 {
- vals, _ := k.getUint64s(delim, true, false)
+ vals, _ := k.parseUint64s(k.Strings(delim), true, false)
return vals
}
// TimesFormat parses with given format and returns list of time.Time divided by given delimiter.
// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC).
func (k *Key) TimesFormat(format, delim string) []time.Time {
- vals, _ := k.getTimesFormat(format, delim, true, false)
+ vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false)
return vals
}
@@ -445,41 +517,41 @@ func (k *Key) Times(delim string) []time.Time {
// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then
// it will not be included to result list.
func (k *Key) ValidFloat64s(delim string) []float64 {
- vals, _ := k.getFloat64s(delim, false, false)
+ vals, _ := k.parseFloat64s(k.Strings(delim), false, false)
return vals
}
// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will
// not be included to result list.
func (k *Key) ValidInts(delim string) []int {
- vals, _ := k.getInts(delim, false, false)
+ vals, _ := k.parseInts(k.Strings(delim), false, false)
return vals
}
// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer,
// then it will not be included to result list.
func (k *Key) ValidInt64s(delim string) []int64 {
- vals, _ := k.getInt64s(delim, false, false)
+ vals, _ := k.parseInt64s(k.Strings(delim), false, false)
return vals
}
// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer,
// then it will not be included to result list.
func (k *Key) ValidUints(delim string) []uint {
- vals, _ := k.getUints(delim, false, false)
+ vals, _ := k.parseUints(k.Strings(delim), false, false)
return vals
}
// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned
// integer, then it will not be included to result list.
func (k *Key) ValidUint64s(delim string) []uint64 {
- vals, _ := k.getUint64s(delim, false, false)
+ vals, _ := k.parseUint64s(k.Strings(delim), false, false)
return vals
}
// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter.
func (k *Key) ValidTimesFormat(format, delim string) []time.Time {
- vals, _ := k.getTimesFormat(format, delim, false, false)
+ vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false)
return vals
}
@@ -490,33 +562,33 @@ func (k *Key) ValidTimes(delim string) []time.Time {
// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input.
func (k *Key) StrictFloat64s(delim string) ([]float64, error) {
- return k.getFloat64s(delim, false, true)
+ return k.parseFloat64s(k.Strings(delim), false, true)
}
// StrictInts returns list of int divided by given delimiter or error on first invalid input.
func (k *Key) StrictInts(delim string) ([]int, error) {
- return k.getInts(delim, false, true)
+ return k.parseInts(k.Strings(delim), false, true)
}
// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input.
func (k *Key) StrictInt64s(delim string) ([]int64, error) {
- return k.getInt64s(delim, false, true)
+ return k.parseInt64s(k.Strings(delim), false, true)
}
// StrictUints returns list of uint divided by given delimiter or error on first invalid input.
func (k *Key) StrictUints(delim string) ([]uint, error) {
- return k.getUints(delim, false, true)
+ return k.parseUints(k.Strings(delim), false, true)
}
// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input.
func (k *Key) StrictUint64s(delim string) ([]uint64, error) {
- return k.getUint64s(delim, false, true)
+ return k.parseUint64s(k.Strings(delim), false, true)
}
// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter
// or error on first invalid input.
func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) {
- return k.getTimesFormat(format, delim, false, true)
+ return k.parseTimesFormat(format, k.Strings(delim), false, true)
}
// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter
@@ -525,9 +597,8 @@ func (k *Key) StrictTimes(delim string) ([]time.Time, error) {
return k.StrictTimesFormat(time.RFC3339, delim)
}
-// getFloat64s returns list of float64 divided by given delimiter.
-func (k *Key) getFloat64s(delim string, addInvalid, returnOnInvalid bool) ([]float64, error) {
- strs := k.Strings(delim)
+// parseFloat64s transforms strings to float64s.
+func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) {
vals := make([]float64, 0, len(strs))
for _, str := range strs {
val, err := strconv.ParseFloat(str, 64)
@@ -541,9 +612,8 @@ func (k *Key) getFloat64s(delim string, addInvalid, returnOnInvalid bool) ([]flo
return vals, nil
}
-// getInts returns list of int divided by given delimiter.
-func (k *Key) getInts(delim string, addInvalid, returnOnInvalid bool) ([]int, error) {
- strs := k.Strings(delim)
+// parseInts transforms strings to ints.
+func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) {
vals := make([]int, 0, len(strs))
for _, str := range strs {
val, err := strconv.Atoi(str)
@@ -557,9 +627,8 @@ func (k *Key) getInts(delim string, addInvalid, returnOnInvalid bool) ([]int, er
return vals, nil
}
-// getInt64s returns list of int64 divided by given delimiter.
-func (k *Key) getInt64s(delim string, addInvalid, returnOnInvalid bool) ([]int64, error) {
- strs := k.Strings(delim)
+// parseInt64s transforms strings to int64s.
+func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) {
vals := make([]int64, 0, len(strs))
for _, str := range strs {
val, err := strconv.ParseInt(str, 10, 64)
@@ -573,9 +642,8 @@ func (k *Key) getInt64s(delim string, addInvalid, returnOnInvalid bool) ([]int64
return vals, nil
}
-// getUints returns list of uint divided by given delimiter.
-func (k *Key) getUints(delim string, addInvalid, returnOnInvalid bool) ([]uint, error) {
- strs := k.Strings(delim)
+// parseUints transforms strings to uints.
+func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) {
vals := make([]uint, 0, len(strs))
for _, str := range strs {
val, err := strconv.ParseUint(str, 10, 0)
@@ -589,9 +657,8 @@ func (k *Key) getUints(delim string, addInvalid, returnOnInvalid bool) ([]uint,
return vals, nil
}
-// getUint64s returns list of uint64 divided by given delimiter.
-func (k *Key) getUint64s(delim string, addInvalid, returnOnInvalid bool) ([]uint64, error) {
- strs := k.Strings(delim)
+// parseUint64s transforms strings to uint64s.
+func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) {
vals := make([]uint64, 0, len(strs))
for _, str := range strs {
val, err := strconv.ParseUint(str, 10, 64)
@@ -605,9 +672,8 @@ func (k *Key) getUint64s(delim string, addInvalid, returnOnInvalid bool) ([]uint
return vals, nil
}
-// getTimesFormat parses with given format and returns list of time.Time divided by given delimiter.
-func (k *Key) getTimesFormat(format, delim string, addInvalid, returnOnInvalid bool) ([]time.Time, error) {
- strs := k.Strings(delim)
+// parseTimesFormat transforms strings to times in given format.
+func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) {
vals := make([]time.Time, 0, len(strs))
for _, str := range strs {
val, err := time.Parse(format, str)
diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go
index 01ff20a..6c0b107 100644
--- a/vendor/github.com/go-ini/ini/parser.go
+++ b/vendor/github.com/go-ini/ini/parser.go
@@ -193,7 +193,7 @@ func hasSurroundedQuote(in string, quote byte) bool {
strings.IndexByte(in[1:], quote) == len(in)-2
}
-func (p *parser) readValue(in []byte, ignoreContinuation bool) (string, error) {
+func (p *parser) readValue(in []byte, ignoreContinuation, ignoreInlineComment bool) (string, error) {
line := strings.TrimLeftFunc(string(in), unicode.IsSpace)
if len(line) == 0 {
return "", nil
@@ -217,18 +217,21 @@ func (p *parser) readValue(in []byte, ignoreContinuation bool) (string, error) {
return line[startIdx : pos+startIdx], nil
}
- // Won't be able to reach here if value only contains whitespace.
+ // Won't be able to reach here if value only contains whitespace
line = strings.TrimSpace(line)
- // Check continuation lines when desired.
+ // Check continuation lines when desired
if !ignoreContinuation && line[len(line)-1] == '\\' {
return p.readContinuationLines(line[:len(line)-1])
}
- i := strings.IndexAny(line, "#;")
- if i > -1 {
- p.comment.WriteString(line[i:])
- line = strings.TrimSpace(line[:i])
+ // Check if ignore inline comment
+ if !ignoreInlineComment {
+ i := strings.IndexAny(line, "#;")
+ if i > -1 {
+ p.comment.WriteString(line[i:])
+ line = strings.TrimSpace(line[:i])
+ }
}
// Trim single quotes
@@ -318,7 +321,7 @@ func (f *File) parse(reader io.Reader) (err error) {
if err != nil {
// Treat as boolean key when desired, and whole line is key name.
if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys {
- kname, err := p.readValue(line, f.options.IgnoreContinuation)
+ kname, err := p.readValue(line, f.options.IgnoreContinuation, f.options.IgnoreInlineComment)
if err != nil {
return err
}
@@ -341,17 +344,16 @@ func (f *File) parse(reader io.Reader) (err error) {
p.count++
}
- key, err := section.NewKey(kname, "")
+ value, err := p.readValue(line[offset:], f.options.IgnoreContinuation, f.options.IgnoreInlineComment)
if err != nil {
return err
}
- key.isAutoIncrement = isAutoIncr
- value, err := p.readValue(line[offset:], f.options.IgnoreContinuation)
+ key, err := section.NewKey(kname, value)
if err != nil {
return err
}
- key.SetValue(value)
+ key.isAutoIncrement = isAutoIncr
key.Comment = strings.TrimSpace(p.comment.String())
p.comment.Reset()
}
diff --git a/vendor/github.com/go-ini/ini/section.go b/vendor/github.com/go-ini/ini/section.go
index 806f149..94f7375 100644
--- a/vendor/github.com/go-ini/ini/section.go
+++ b/vendor/github.com/go-ini/ini/section.go
@@ -68,16 +68,18 @@ func (s *Section) NewKey(name, val string) (*Key, error) {
}
if inSlice(name, s.keyList) {
- s.keys[name].value = val
+ if s.f.options.AllowShadows {
+ if err := s.keys[name].addShadow(val); err != nil {
+ return nil, err
+ }
+ } else {
+ s.keys[name].value = val
+ }
return s.keys[name], nil
}
s.keyList = append(s.keyList, name)
- s.keys[name] = &Key{
- s: s,
- name: name,
- value: val,
- }
+ s.keys[name] = newKey(s, name, val)
s.keysHash[name] = val
return s.keys[name], nil
}
@@ -230,3 +232,17 @@ func (s *Section) DeleteKey(name string) {
}
}
}
+
+// ChildSections returns a list of child sections of current section.
+// For example, "[parent.child1]" and "[parent.child12]" are child sections
+// of section "[parent]".
+func (s *Section) ChildSections() []*Section {
+ prefix := s.name + "."
+ children := make([]*Section, 0, 3)
+ for _, name := range s.f.sectionList {
+ if strings.HasPrefix(name, prefix) {
+ children = append(children, s.f.sections[name])
+ }
+ }
+ return children
+}
diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go
index 5ef38d8..031c78b 100644
--- a/vendor/github.com/go-ini/ini/struct.go
+++ b/vendor/github.com/go-ini/ini/struct.go
@@ -78,8 +78,14 @@ func parseDelim(actual string) string {
var reflectTime = reflect.TypeOf(time.Now()).Kind()
// setSliceWithProperType sets proper values to slice based on its type.
-func setSliceWithProperType(key *Key, field reflect.Value, delim string) error {
- strs := key.Strings(delim)
+func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow bool) error {
+ var strs []string
+ if allowShadow {
+ strs = key.StringsWithShadows(delim)
+ } else {
+ strs = key.Strings(delim)
+ }
+
numVals := len(strs)
if numVals == 0 {
return nil
@@ -92,17 +98,17 @@ func setSliceWithProperType(key *Key, field reflect.Value, delim string) error {
case reflect.String:
vals = strs
case reflect.Int:
- vals = key.Ints(delim)
+ vals, _ = key.parseInts(strs, true, false)
case reflect.Int64:
- vals = key.Int64s(delim)
+ vals, _ = key.parseInt64s(strs, true, false)
case reflect.Uint:
- vals = key.Uints(delim)
+ vals, _ = key.parseUints(strs, true, false)
case reflect.Uint64:
- vals = key.Uint64s(delim)
+ vals, _ = key.parseUint64s(strs, true, false)
case reflect.Float64:
- vals = key.Float64s(delim)
+ vals, _ = key.parseFloat64s(strs, true, false)
case reflectTime:
- vals = key.Times(delim)
+ vals, _ = key.parseTimesFormat(time.RFC3339, strs, true, false)
default:
return fmt.Errorf("unsupported type '[]%s'", sliceOf)
}
@@ -133,7 +139,7 @@ func setSliceWithProperType(key *Key, field reflect.Value, delim string) error {
// setWithProperType sets proper value to field based on its type,
// but it does not return error for failing parsing,
// because we want to use default value that is already assigned to strcut.
-func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error {
+func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow bool) error {
switch t.Kind() {
case reflect.String:
if len(key.String()) == 0 {
@@ -187,13 +193,25 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri
}
field.Set(reflect.ValueOf(timeVal))
case reflect.Slice:
- return setSliceWithProperType(key, field, delim)
+ return setSliceWithProperType(key, field, delim, allowShadow)
default:
return fmt.Errorf("unsupported type '%s'", t)
}
return nil
}
+func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool) {
+ opts := strings.SplitN(tag, ",", 3)
+ rawName = opts[0]
+ if len(opts) > 1 {
+ omitEmpty = opts[1] == "omitempty"
+ }
+ if len(opts) > 2 {
+ allowShadow = opts[2] == "allowshadow"
+ }
+ return rawName, omitEmpty, allowShadow
+}
+
func (s *Section) mapTo(val reflect.Value) error {
if val.Kind() == reflect.Ptr {
val = val.Elem()
@@ -209,8 +227,8 @@ func (s *Section) mapTo(val reflect.Value) error {
continue
}
- opts := strings.SplitN(tag, ",", 2) // strip off possible omitempty
- fieldName := s.parseFieldName(tpField.Name, opts[0])
+ rawName, _, allowShadow := parseTagOptions(tag)
+ fieldName := s.parseFieldName(tpField.Name, rawName)
if len(fieldName) == 0 || !field.CanSet() {
continue
}
@@ -231,7 +249,8 @@ func (s *Section) mapTo(val reflect.Value) error {
}
if key, err := s.GetKey(fieldName); err == nil {
- if err = setWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil {
+ delim := parseDelim(tpField.Tag.Get("delim"))
+ if err = setWithProperType(tpField.Type, key, field, delim, allowShadow); err != nil {
return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
}
}
diff --git a/vendor/github.com/go-sql-driver/mysql/AUTHORS b/vendor/github.com/go-sql-driver/mysql/AUTHORS
index 1003707..e47e542 100644
--- a/vendor/github.com/go-sql-driver/mysql/AUTHORS
+++ b/vendor/github.com/go-sql-driver/mysql/AUTHORS
@@ -25,6 +25,7 @@ Hanno Braun <mail at hannobraun.com>
Henri Yandell <flamefew at gmail.com>
Hirotaka Yamamoto <ymmt2005 at gmail.com>
INADA Naoki <songofacandy at gmail.com>
+Jacek Szwec <szwec.jacek at gmail.com>
James Harr <james.harr at gmail.com>
Jian Zhen <zhenjl at gmail.com>
Joshua Prunier <joshua.prunier at gmail.com>
@@ -41,6 +42,7 @@ Michael Woolnough <michael.woolnough at gmail.com>
Nicola Peduzzi <thenikso at gmail.com>
Olivier Mengué <dolmen at cpan.org>
Paul Bonser <misterpib at gmail.com>
+Peter Schultz <peter.schultz at classmarkets.com>
Runrioter Wung <runrioter at gmail.com>
Soroush Pour <me at soroushjp.com>
Stan Putrya <root.vagner at gmail.com>
diff --git a/vendor/github.com/go-sql-driver/mysql/README.md b/vendor/github.com/go-sql-driver/mysql/README.md
index 6452038..a060e3c 100644
--- a/vendor/github.com/go-sql-driver/mysql/README.md
+++ b/vendor/github.com/go-sql-driver/mysql/README.md
@@ -15,6 +15,7 @@ A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) pac
* [Address](#address)
* [Parameters](#parameters)
* [Examples](#examples)
+ * [Connection pool and timeouts](#connection-pool-and-timeouts)
* [LOAD DATA LOCAL INFILE support](#load-data-local-infile-support)
* [time.Time support](#timetime-support)
* [Unicode support](#unicode-support)
@@ -260,11 +261,11 @@ Default: false
##### `readTimeout`
```
-Type: decimal number
+Type: duration
Default: 0
```
-I/O read timeout. The value must be a decimal number with an unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+I/O read timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
##### `strict`
@@ -283,11 +284,11 @@ By default MySQL also treats notes as warnings. Use [`sql_notes=false`](http://d
##### `timeout`
```
-Type: decimal number
+Type: duration
Default: OS default
```
-*Driver* side connection timeout. The value must be a decimal number with an unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*. To set a server side timeout, use the parameter [`wait_timeout`](http://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html#sysvar_wait_timeout).
+Timeout for establishing connections, aka dial timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
##### `tls`
@@ -302,11 +303,11 @@ Default: false
##### `writeTimeout`
```
-Type: decimal number
+Type: duration
Default: 0
```
-I/O write timeout. The value must be a decimal number with an unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*.
+I/O write timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*.
##### System Variables
@@ -380,6 +381,11 @@ No Database preselected:
user:password@/
```
+
+### Connection pool and timeouts
+The connection pool is managed by Go's database/sql package. For details on how to configure the size of the pool and how long connections stay in the pool see `*DB.SetMaxOpenConns`, `*DB.SetMaxIdleConns`, and `*DB.SetConnMaxLifetime` in the [database/sql documentation](https://golang.org/pkg/database/sql/). The read, write, and dial timeouts for each individual connection are configured with the DSN parameters [`readTimeout`](#readtimeout), [`writeTimeout`](#writetimeout), and [`timeout`](#timeout), respectively.
+
+
### `LOAD DATA LOCAL INFILE` support
For this feature you need direct access to the package. Therefore you must change the import path (no `_`):
```go
diff --git a/vendor/github.com/go-sql-driver/mysql/connection.go b/vendor/github.com/go-sql-driver/mysql/connection.go
index d82c728..08e5fad 100644
--- a/vendor/github.com/go-sql-driver/mysql/connection.go
+++ b/vendor/github.com/go-sql-driver/mysql/connection.go
@@ -10,6 +10,7 @@ package mysql
import (
"database/sql/driver"
+ "io"
"net"
"strconv"
"strings"
@@ -289,22 +290,29 @@ func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, err
// Internal function to execute commands
func (mc *mysqlConn) exec(query string) error {
// Send command
- err := mc.writeCommandPacketStr(comQuery, query)
- if err != nil {
+ if err := mc.writeCommandPacketStr(comQuery, query); err != nil {
return err
}
// Read Result
resLen, err := mc.readResultSetHeaderPacket()
- if err == nil && resLen > 0 {
- if err = mc.readUntilEOF(); err != nil {
+ if err != nil {
+ return err
+ }
+
+ if resLen > 0 {
+ // columns
+ if err := mc.readUntilEOF(); err != nil {
return err
}
- err = mc.readUntilEOF()
+ // rows
+ if err := mc.readUntilEOF(); err != nil {
+ return err
+ }
}
- return err
+ return mc.discardResults()
}
func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) {
@@ -335,11 +343,17 @@ func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, erro
rows.mc = mc
if resLen == 0 {
- // no columns, no more data
- return emptyRows{}, nil
+ rows.rs.done = true
+
+ switch err := rows.NextResultSet(); err {
+ case nil, io.EOF:
+ return rows, nil
+ default:
+ return nil, err
+ }
}
// Columns
- rows.columns, err = mc.readColumns(resLen)
+ rows.rs.columns, err = mc.readColumns(resLen)
return rows, err
}
}
@@ -359,7 +373,7 @@ func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) {
if err == nil {
rows := new(textRows)
rows.mc = mc
- rows.columns = []mysqlField{{fieldType: fieldTypeVarChar}}
+ rows.rs.columns = []mysqlField{{fieldType: fieldTypeVarChar}}
if resLen > 0 {
// Columns
diff --git a/vendor/github.com/go-sql-driver/mysql/packets.go b/vendor/github.com/go-sql-driver/mysql/packets.go
index aafe979..41b4d3d 100644
--- a/vendor/github.com/go-sql-driver/mysql/packets.go
+++ b/vendor/github.com/go-sql-driver/mysql/packets.go
@@ -584,8 +584,8 @@ func (mc *mysqlConn) handleOkPacket(data []byte) error {
// server_status [2 bytes]
mc.status = readStatus(data[1+n+m : 1+n+m+2])
- if err := mc.discardResults(); err != nil {
- return err
+ if mc.status&statusMoreResultsExists != 0 {
+ return nil
}
// warning count [2 bytes]
@@ -698,6 +698,10 @@ func (mc *mysqlConn) readColumns(count int) ([]mysqlField, error) {
func (rows *textRows) readRow(dest []driver.Value) error {
mc := rows.mc
+ if rows.rs.done {
+ return io.EOF
+ }
+
data, err := mc.readPacket()
if err != nil {
return err
@@ -707,15 +711,11 @@ func (rows *textRows) readRow(dest []driver.Value) error {
if data[0] == iEOF && len(data) == 5 {
// server_status [2 bytes]
rows.mc.status = readStatus(data[3:])
- err = rows.mc.discardResults()
- if err == nil {
- err = io.EOF
- } else {
- // connection unusable
- rows.mc.Close()
+ rows.rs.done = true
+ if !rows.HasNextResultSet() {
+ rows.mc = nil
}
- rows.mc = nil
- return err
+ return io.EOF
}
if data[0] == iERR {
rows.mc = nil
@@ -736,7 +736,7 @@ func (rows *textRows) readRow(dest []driver.Value) error {
if !mc.parseTime {
continue
} else {
- switch rows.columns[i].fieldType {
+ switch rows.rs.columns[i].fieldType {
case fieldTypeTimestamp, fieldTypeDateTime,
fieldTypeDate, fieldTypeNewDate:
dest[i], err = parseDateTime(
@@ -1097,8 +1097,6 @@ func (mc *mysqlConn) discardResults() error {
if err := mc.readUntilEOF(); err != nil {
return err
}
- } else {
- mc.status &^= statusMoreResultsExists
}
}
return nil
@@ -1116,15 +1114,11 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
// EOF Packet
if data[0] == iEOF && len(data) == 5 {
rows.mc.status = readStatus(data[3:])
- err = rows.mc.discardResults()
- if err == nil {
- err = io.EOF
- } else {
- // connection unusable
- rows.mc.Close()
+ rows.rs.done = true
+ if !rows.HasNextResultSet() {
+ rows.mc = nil
}
- rows.mc = nil
- return err
+ return io.EOF
}
rows.mc = nil
@@ -1145,14 +1139,14 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
}
// Convert to byte-coded string
- switch rows.columns[i].fieldType {
+ switch rows.rs.columns[i].fieldType {
case fieldTypeNULL:
dest[i] = nil
continue
// Numeric Types
case fieldTypeTiny:
- if rows.columns[i].flags&flagUnsigned != 0 {
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
dest[i] = int64(data[pos])
} else {
dest[i] = int64(int8(data[pos]))
@@ -1161,7 +1155,7 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
continue
case fieldTypeShort, fieldTypeYear:
- if rows.columns[i].flags&flagUnsigned != 0 {
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
dest[i] = int64(binary.LittleEndian.Uint16(data[pos : pos+2]))
} else {
dest[i] = int64(int16(binary.LittleEndian.Uint16(data[pos : pos+2])))
@@ -1170,7 +1164,7 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
continue
case fieldTypeInt24, fieldTypeLong:
- if rows.columns[i].flags&flagUnsigned != 0 {
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
dest[i] = int64(binary.LittleEndian.Uint32(data[pos : pos+4]))
} else {
dest[i] = int64(int32(binary.LittleEndian.Uint32(data[pos : pos+4])))
@@ -1179,7 +1173,7 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
continue
case fieldTypeLongLong:
- if rows.columns[i].flags&flagUnsigned != 0 {
+ if rows.rs.columns[i].flags&flagUnsigned != 0 {
val := binary.LittleEndian.Uint64(data[pos : pos+8])
if val > math.MaxInt64 {
dest[i] = uint64ToString(val)
@@ -1233,10 +1227,10 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
case isNull:
dest[i] = nil
continue
- case rows.columns[i].fieldType == fieldTypeTime:
+ case rows.rs.columns[i].fieldType == fieldTypeTime:
// database/sql does not support an equivalent to TIME, return a string
var dstlen uint8
- switch decimals := rows.columns[i].decimals; decimals {
+ switch decimals := rows.rs.columns[i].decimals; decimals {
case 0x00, 0x1f:
dstlen = 8
case 1, 2, 3, 4, 5, 6:
@@ -1244,7 +1238,7 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
default:
return fmt.Errorf(
"protocol error, illegal decimals value %d",
- rows.columns[i].decimals,
+ rows.rs.columns[i].decimals,
)
}
dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen, true)
@@ -1252,10 +1246,10 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
dest[i], err = parseBinaryDateTime(num, data[pos:], rows.mc.cfg.Loc)
default:
var dstlen uint8
- if rows.columns[i].fieldType == fieldTypeDate {
+ if rows.rs.columns[i].fieldType == fieldTypeDate {
dstlen = 10
} else {
- switch decimals := rows.columns[i].decimals; decimals {
+ switch decimals := rows.rs.columns[i].decimals; decimals {
case 0x00, 0x1f:
dstlen = 19
case 1, 2, 3, 4, 5, 6:
@@ -1263,7 +1257,7 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
default:
return fmt.Errorf(
"protocol error, illegal decimals value %d",
- rows.columns[i].decimals,
+ rows.rs.columns[i].decimals,
)
}
}
@@ -1279,7 +1273,7 @@ func (rows *binaryRows) readRow(dest []driver.Value) error {
// Please report if this happens!
default:
- return fmt.Errorf("unknown field type %d", rows.columns[i].fieldType)
+ return fmt.Errorf("unknown field type %d", rows.rs.columns[i].fieldType)
}
}
diff --git a/vendor/github.com/go-sql-driver/mysql/rows.go b/vendor/github.com/go-sql-driver/mysql/rows.go
index c08255e..900f548 100644
--- a/vendor/github.com/go-sql-driver/mysql/rows.go
+++ b/vendor/github.com/go-sql-driver/mysql/rows.go
@@ -21,40 +21,49 @@ type mysqlField struct {
decimals byte
}
-type mysqlRows struct {
- mc *mysqlConn
+type resultSet struct {
columns []mysqlField
+ done bool
+}
+
+type mysqlRows struct {
+ mc *mysqlConn
+ rs resultSet
}
type binaryRows struct {
mysqlRows
+ // stmtCols is a pointer to the statement's cached columns for different
+ // result sets.
+ stmtCols *[][]mysqlField
+ // i is a number of the current result set. It is used to fetch proper
+ // columns from stmtCols.
+ i int
}
type textRows struct {
mysqlRows
}
-type emptyRows struct{}
-
func (rows *mysqlRows) Columns() []string {
- columns := make([]string, len(rows.columns))
+ columns := make([]string, len(rows.rs.columns))
if rows.mc != nil && rows.mc.cfg.ColumnsWithAlias {
for i := range columns {
- if tableName := rows.columns[i].tableName; len(tableName) > 0 {
- columns[i] = tableName + "." + rows.columns[i].name
+ if tableName := rows.rs.columns[i].tableName; len(tableName) > 0 {
+ columns[i] = tableName + "." + rows.rs.columns[i].name
} else {
- columns[i] = rows.columns[i].name
+ columns[i] = rows.rs.columns[i].name
}
}
} else {
for i := range columns {
- columns[i] = rows.columns[i].name
+ columns[i] = rows.rs.columns[i].name
}
}
return columns
}
-func (rows *mysqlRows) Close() error {
+func (rows *mysqlRows) Close() (err error) {
mc := rows.mc
if mc == nil {
return nil
@@ -64,7 +73,9 @@ func (rows *mysqlRows) Close() error {
}
// Remove unread packets from stream
- err := mc.readUntilEOF()
+ if !rows.rs.done {
+ err = mc.readUntilEOF()
+ }
if err == nil {
if err = mc.discardResults(); err != nil {
return err
@@ -75,6 +86,73 @@ func (rows *mysqlRows) Close() error {
return err
}
+func (rows *mysqlRows) HasNextResultSet() (b bool) {
+ if rows.mc == nil {
+ return false
+ }
+ return rows.mc.status&statusMoreResultsExists != 0
+}
+
+func (rows *mysqlRows) nextResultSet() (int, error) {
+ if rows.mc == nil {
+ return 0, io.EOF
+ }
+ if rows.mc.netConn == nil {
+ return 0, ErrInvalidConn
+ }
+
+ // Remove unread packets from stream
+ if !rows.rs.done {
+ if err := rows.mc.readUntilEOF(); err != nil {
+ return 0, err
+ }
+ rows.rs.done = true
+ }
+
+ if !rows.HasNextResultSet() {
+ rows.mc = nil
+ return 0, io.EOF
+ }
+ rows.rs = resultSet{}
+ return rows.mc.readResultSetHeaderPacket()
+}
+
+func (rows *mysqlRows) nextNotEmptyResultSet() (int, error) {
+ for {
+ resLen, err := rows.nextResultSet()
+ if err != nil {
+ return 0, err
+ }
+
+ if resLen > 0 {
+ return resLen, nil
+ }
+
+ rows.rs.done = true
+ }
+}
+
+func (rows *binaryRows) NextResultSet() (err error) {
+ resLen, err := rows.nextNotEmptyResultSet()
+ if err != nil {
+ return err
+ }
+
+ // get columns, if not cached, read them and cache them.
+ if rows.i >= len(*rows.stmtCols) {
+ rows.rs.columns, err = rows.mc.readColumns(resLen)
+ *rows.stmtCols = append(*rows.stmtCols, rows.rs.columns)
+ } else {
+ rows.rs.columns = (*rows.stmtCols)[rows.i]
+ if err := rows.mc.readUntilEOF(); err != nil {
+ return err
+ }
+ }
+
+ rows.i++
+ return nil
+}
+
func (rows *binaryRows) Next(dest []driver.Value) error {
if mc := rows.mc; mc != nil {
if mc.netConn == nil {
@@ -87,6 +165,16 @@ func (rows *binaryRows) Next(dest []driver.Value) error {
return io.EOF
}
+func (rows *textRows) NextResultSet() (err error) {
+ resLen, err := rows.nextNotEmptyResultSet()
+ if err != nil {
+ return err
+ }
+
+ rows.rs.columns, err = rows.mc.readColumns(resLen)
+ return err
+}
+
func (rows *textRows) Next(dest []driver.Value) error {
if mc := rows.mc; mc != nil {
if mc.netConn == nil {
@@ -98,15 +186,3 @@ func (rows *textRows) Next(dest []driver.Value) error {
}
return io.EOF
}
-
-func (rows emptyRows) Columns() []string {
- return nil
-}
-
-func (rows emptyRows) Close() error {
- return nil
-}
-
-func (rows emptyRows) Next(dest []driver.Value) error {
- return io.EOF
-}
diff --git a/vendor/github.com/go-sql-driver/mysql/statement.go b/vendor/github.com/go-sql-driver/mysql/statement.go
index 7f9b045..b887716 100644
--- a/vendor/github.com/go-sql-driver/mysql/statement.go
+++ b/vendor/github.com/go-sql-driver/mysql/statement.go
@@ -11,6 +11,7 @@ package mysql
import (
"database/sql/driver"
"fmt"
+ "io"
"reflect"
"strconv"
)
@@ -19,7 +20,7 @@ type mysqlStmt struct {
mc *mysqlConn
id uint32
paramCount int
- columns []mysqlField // cached from the first query
+ columns [][]mysqlField // cached from the first query
}
func (stmt *mysqlStmt) Close() error {
@@ -62,26 +63,30 @@ func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
// Read Result
resLen, err := mc.readResultSetHeaderPacket()
- if err == nil {
- if resLen > 0 {
- // Columns
- err = mc.readUntilEOF()
- if err != nil {
- return nil, err
- }
+ if err != nil {
+ return nil, err
+ }
- // Rows
- err = mc.readUntilEOF()
+ if resLen > 0 {
+ // Columns
+ if err = mc.readUntilEOF(); err != nil {
+ return nil, err
}
- if err == nil {
- return &mysqlResult{
- affectedRows: int64(mc.affectedRows),
- insertId: int64(mc.insertId),
- }, nil
+
+ // Rows
+ if err := mc.readUntilEOF(); err != nil {
+ return nil, err
}
}
- return nil, err
+ if err := mc.discardResults(); err != nil {
+ return nil, err
+ }
+
+ return &mysqlResult{
+ affectedRows: int64(mc.affectedRows),
+ insertId: int64(mc.insertId),
+ }, nil
}
func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
@@ -104,18 +109,29 @@ func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
}
rows := new(binaryRows)
+ rows.stmtCols = &stmt.columns
if resLen > 0 {
rows.mc = mc
+ rows.i++
// Columns
// If not cached, read them and cache them
- if stmt.columns == nil {
- rows.columns, err = mc.readColumns(resLen)
- stmt.columns = rows.columns
+ if len(stmt.columns) == 0 {
+ rows.rs.columns, err = mc.readColumns(resLen)
+ stmt.columns = append(stmt.columns, rows.rs.columns)
} else {
- rows.columns = stmt.columns
+ rows.rs.columns = stmt.columns[0]
err = mc.readUntilEOF()
}
+ } else {
+ rows.rs.done = true
+
+ switch err := rows.NextResultSet(); err {
+ case nil, io.EOF:
+ return rows, nil
+ default:
+ return nil, err
+ }
}
return rows, err
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile
new file mode 100644
index 0000000..4942418
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile
@@ -0,0 +1,39 @@
+# Go support for Protocol Buffers - Google's data interchange format
+#
+# Copyright 2010 The Go Authors. All rights reserved.
+# https://github.com/golang/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Not stored here, but descriptor.proto is in https://github.com/google/protobuf/
+# at src/google/protobuf/descriptor.proto
+regenerate:
+ echo WARNING! THIS RULE IS PROBABLY NOT RIGHT FOR YOUR INSTALLATION
+ protoc --go_out=. -I$(HOME)/src/protobuf/src $(HOME)/src/protobuf/src/google/protobuf/descriptor.proto && \
+ sed 's,^package google_protobuf,package descriptor,' google/protobuf/descriptor.pb.go > \
+ $(GOPATH)/src/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go && \
+ rm -f google/protobuf/descriptor.pb.go
diff --git a/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
new file mode 100644
index 0000000..a1d8a76
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go
@@ -0,0 +1,2065 @@
+// Code generated by protoc-gen-go.
+// source: google/protobuf/descriptor.proto
+// DO NOT EDIT!
+
+/*
+Package descriptor is a generated protocol buffer package.
+
+It is generated from these files:
+ google/protobuf/descriptor.proto
+
+It has these top-level messages:
+ FileDescriptorSet
+ FileDescriptorProto
+ DescriptorProto
+ FieldDescriptorProto
+ OneofDescriptorProto
+ EnumDescriptorProto
+ EnumValueDescriptorProto
+ ServiceDescriptorProto
+ MethodDescriptorProto
+ FileOptions
+ MessageOptions
+ FieldOptions
+ OneofOptions
+ EnumOptions
+ EnumValueOptions
+ ServiceOptions
+ MethodOptions
+ UninterpretedOption
+ SourceCodeInfo
+ GeneratedCodeInfo
+*/
+package descriptor
+
+import proto "github.com/golang/protobuf/proto"
+import fmt "fmt"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type FieldDescriptorProto_Type int32
+
+const (
+ // 0 is reserved for errors.
+ // Order is weird for historical reasons.
+ FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1
+ FieldDescriptorProto_TYPE_FLOAT FieldDescriptorProto_Type = 2
+ // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if
+ // negative values are likely.
+ FieldDescriptorProto_TYPE_INT64 FieldDescriptorProto_Type = 3
+ FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4
+ // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if
+ // negative values are likely.
+ FieldDescriptorProto_TYPE_INT32 FieldDescriptorProto_Type = 5
+ FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6
+ FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7
+ FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8
+ FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9
+ FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10
+ FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11
+ // New in version 2.
+ FieldDescriptorProto_TYPE_BYTES FieldDescriptorProto_Type = 12
+ FieldDescriptorProto_TYPE_UINT32 FieldDescriptorProto_Type = 13
+ FieldDescriptorProto_TYPE_ENUM FieldDescriptorProto_Type = 14
+ FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15
+ FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16
+ FieldDescriptorProto_TYPE_SINT32 FieldDescriptorProto_Type = 17
+ FieldDescriptorProto_TYPE_SINT64 FieldDescriptorProto_Type = 18
+)
+
+var FieldDescriptorProto_Type_name = map[int32]string{
+ 1: "TYPE_DOUBLE",
+ 2: "TYPE_FLOAT",
+ 3: "TYPE_INT64",
+ 4: "TYPE_UINT64",
+ 5: "TYPE_INT32",
+ 6: "TYPE_FIXED64",
+ 7: "TYPE_FIXED32",
+ 8: "TYPE_BOOL",
+ 9: "TYPE_STRING",
+ 10: "TYPE_GROUP",
+ 11: "TYPE_MESSAGE",
+ 12: "TYPE_BYTES",
+ 13: "TYPE_UINT32",
+ 14: "TYPE_ENUM",
+ 15: "TYPE_SFIXED32",
+ 16: "TYPE_SFIXED64",
+ 17: "TYPE_SINT32",
+ 18: "TYPE_SINT64",
+}
+var FieldDescriptorProto_Type_value = map[string]int32{
+ "TYPE_DOUBLE": 1,
+ "TYPE_FLOAT": 2,
+ "TYPE_INT64": 3,
+ "TYPE_UINT64": 4,
+ "TYPE_INT32": 5,
+ "TYPE_FIXED64": 6,
+ "TYPE_FIXED32": 7,
+ "TYPE_BOOL": 8,
+ "TYPE_STRING": 9,
+ "TYPE_GROUP": 10,
+ "TYPE_MESSAGE": 11,
+ "TYPE_BYTES": 12,
+ "TYPE_UINT32": 13,
+ "TYPE_ENUM": 14,
+ "TYPE_SFIXED32": 15,
+ "TYPE_SFIXED64": 16,
+ "TYPE_SINT32": 17,
+ "TYPE_SINT64": 18,
+}
+
+func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type {
+ p := new(FieldDescriptorProto_Type)
+ *p = x
+ return p
+}
+func (x FieldDescriptorProto_Type) String() string {
+ return proto.EnumName(FieldDescriptorProto_Type_name, int32(x))
+}
+func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type")
+ if err != nil {
+ return err
+ }
+ *x = FieldDescriptorProto_Type(value)
+ return nil
+}
+func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{3, 0} }
+
+type FieldDescriptorProto_Label int32
+
+const (
+ // 0 is reserved for errors
+ FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1
+ FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2
+ FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3
+)
+
+var FieldDescriptorProto_Label_name = map[int32]string{
+ 1: "LABEL_OPTIONAL",
+ 2: "LABEL_REQUIRED",
+ 3: "LABEL_REPEATED",
+}
+var FieldDescriptorProto_Label_value = map[string]int32{
+ "LABEL_OPTIONAL": 1,
+ "LABEL_REQUIRED": 2,
+ "LABEL_REPEATED": 3,
+}
+
+func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label {
+ p := new(FieldDescriptorProto_Label)
+ *p = x
+ return p
+}
+func (x FieldDescriptorProto_Label) String() string {
+ return proto.EnumName(FieldDescriptorProto_Label_name, int32(x))
+}
+func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label")
+ if err != nil {
+ return err
+ }
+ *x = FieldDescriptorProto_Label(value)
+ return nil
+}
+func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor0, []int{3, 1}
+}
+
+// Generated classes can be optimized for speed or code size.
+type FileOptions_OptimizeMode int32
+
+const (
+ FileOptions_SPEED FileOptions_OptimizeMode = 1
+ // etc.
+ FileOptions_CODE_SIZE FileOptions_OptimizeMode = 2
+ FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3
+)
+
+var FileOptions_OptimizeMode_name = map[int32]string{
+ 1: "SPEED",
+ 2: "CODE_SIZE",
+ 3: "LITE_RUNTIME",
+}
+var FileOptions_OptimizeMode_value = map[string]int32{
+ "SPEED": 1,
+ "CODE_SIZE": 2,
+ "LITE_RUNTIME": 3,
+}
+
+func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode {
+ p := new(FileOptions_OptimizeMode)
+ *p = x
+ return p
+}
+func (x FileOptions_OptimizeMode) String() string {
+ return proto.EnumName(FileOptions_OptimizeMode_name, int32(x))
+}
+func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode")
+ if err != nil {
+ return err
+ }
+ *x = FileOptions_OptimizeMode(value)
+ return nil
+}
+func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{9, 0} }
+
+type FieldOptions_CType int32
+
+const (
+ // Default mode.
+ FieldOptions_STRING FieldOptions_CType = 0
+ FieldOptions_CORD FieldOptions_CType = 1
+ FieldOptions_STRING_PIECE FieldOptions_CType = 2
+)
+
+var FieldOptions_CType_name = map[int32]string{
+ 0: "STRING",
+ 1: "CORD",
+ 2: "STRING_PIECE",
+}
+var FieldOptions_CType_value = map[string]int32{
+ "STRING": 0,
+ "CORD": 1,
+ "STRING_PIECE": 2,
+}
+
+func (x FieldOptions_CType) Enum() *FieldOptions_CType {
+ p := new(FieldOptions_CType)
+ *p = x
+ return p
+}
+func (x FieldOptions_CType) String() string {
+ return proto.EnumName(FieldOptions_CType_name, int32(x))
+}
+func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType")
+ if err != nil {
+ return err
+ }
+ *x = FieldOptions_CType(value)
+ return nil
+}
+func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{11, 0} }
+
+type FieldOptions_JSType int32
+
+const (
+ // Use the default type.
+ FieldOptions_JS_NORMAL FieldOptions_JSType = 0
+ // Use JavaScript strings.
+ FieldOptions_JS_STRING FieldOptions_JSType = 1
+ // Use JavaScript numbers.
+ FieldOptions_JS_NUMBER FieldOptions_JSType = 2
+)
+
+var FieldOptions_JSType_name = map[int32]string{
+ 0: "JS_NORMAL",
+ 1: "JS_STRING",
+ 2: "JS_NUMBER",
+}
+var FieldOptions_JSType_value = map[string]int32{
+ "JS_NORMAL": 0,
+ "JS_STRING": 1,
+ "JS_NUMBER": 2,
+}
+
+func (x FieldOptions_JSType) Enum() *FieldOptions_JSType {
+ p := new(FieldOptions_JSType)
+ *p = x
+ return p
+}
+func (x FieldOptions_JSType) String() string {
+ return proto.EnumName(FieldOptions_JSType_name, int32(x))
+}
+func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType")
+ if err != nil {
+ return err
+ }
+ *x = FieldOptions_JSType(value)
+ return nil
+}
+func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{11, 1} }
+
+// The protocol compiler can output a FileDescriptorSet containing the .proto
+// files it parses.
+type FileDescriptorSet struct {
+ File []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} }
+func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) }
+func (*FileDescriptorSet) ProtoMessage() {}
+func (*FileDescriptorSet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
+
+func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto {
+ if m != nil {
+ return m.File
+ }
+ return nil
+}
+
+// Describes a complete .proto file.
+type FileDescriptorProto struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"`
+ // Names of files imported by this file.
+ Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"`
+ // Indexes of the public imported files in the dependency list above.
+ PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"`
+ // Indexes of the weak imported files in the dependency list.
+ // For Google-internal migration only. Do not use.
+ WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"`
+ // All top-level definitions in this file.
+ MessageType []*DescriptorProto `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"`
+ EnumType []*EnumDescriptorProto `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
+ Service []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"`
+ Extension []*FieldDescriptorProto `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"`
+ Options *FileOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"`
+ // This field contains optional information about the original source code.
+ // You may safely remove this entire field without harming runtime
+ // functionality of the descriptors -- the information is needed only by
+ // development tools.
+ SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"`
+ // The syntax of the proto file.
+ // The supported values are "proto2" and "proto3".
+ Syntax *string `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} }
+func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*FileDescriptorProto) ProtoMessage() {}
+func (*FileDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
+
+func (m *FileDescriptorProto) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FileDescriptorProto) GetPackage() string {
+ if m != nil && m.Package != nil {
+ return *m.Package
+ }
+ return ""
+}
+
+func (m *FileDescriptorProto) GetDependency() []string {
+ if m != nil {
+ return m.Dependency
+ }
+ return nil
+}
+
+func (m *FileDescriptorProto) GetPublicDependency() []int32 {
+ if m != nil {
+ return m.PublicDependency
+ }
+ return nil
+}
+
+func (m *FileDescriptorProto) GetWeakDependency() []int32 {
+ if m != nil {
+ return m.WeakDependency
+ }
+ return nil
+}
+
+func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto {
+ if m != nil {
+ return m.MessageType
+ }
+ return nil
+}
+
+func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto {
+ if m != nil {
+ return m.EnumType
+ }
+ return nil
+}
+
+func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto {
+ if m != nil {
+ return m.Service
+ }
+ return nil
+}
+
+func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto {
+ if m != nil {
+ return m.Extension
+ }
+ return nil
+}
+
+func (m *FileDescriptorProto) GetOptions() *FileOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo {
+ if m != nil {
+ return m.SourceCodeInfo
+ }
+ return nil
+}
+
+func (m *FileDescriptorProto) GetSyntax() string {
+ if m != nil && m.Syntax != nil {
+ return *m.Syntax
+ }
+ return ""
+}
+
+// Describes a message type.
+type DescriptorProto struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Field []*FieldDescriptorProto `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"`
+ Extension []*FieldDescriptorProto `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"`
+ NestedType []*DescriptorProto `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"`
+ EnumType []*EnumDescriptorProto `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
+ ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"`
+ OneofDecl []*OneofDescriptorProto `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"`
+ Options *MessageOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"`
+ ReservedRange []*DescriptorProto_ReservedRange `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
+ // Reserved field names, which may not be used by fields in the same message.
+ // A given name may only be reserved once.
+ ReservedName []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DescriptorProto) Reset() { *m = DescriptorProto{} }
+func (m *DescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*DescriptorProto) ProtoMessage() {}
+func (*DescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
+
+func (m *DescriptorProto) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *DescriptorProto) GetField() []*FieldDescriptorProto {
+ if m != nil {
+ return m.Field
+ }
+ return nil
+}
+
+func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto {
+ if m != nil {
+ return m.Extension
+ }
+ return nil
+}
+
+func (m *DescriptorProto) GetNestedType() []*DescriptorProto {
+ if m != nil {
+ return m.NestedType
+ }
+ return nil
+}
+
+func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto {
+ if m != nil {
+ return m.EnumType
+ }
+ return nil
+}
+
+func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange {
+ if m != nil {
+ return m.ExtensionRange
+ }
+ return nil
+}
+
+func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto {
+ if m != nil {
+ return m.OneofDecl
+ }
+ return nil
+}
+
+func (m *DescriptorProto) GetOptions() *MessageOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange {
+ if m != nil {
+ return m.ReservedRange
+ }
+ return nil
+}
+
+func (m *DescriptorProto) GetReservedName() []string {
+ if m != nil {
+ return m.ReservedName
+ }
+ return nil
+}
+
+type DescriptorProto_ExtensionRange struct {
+ Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
+ End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} }
+func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) }
+func (*DescriptorProto_ExtensionRange) ProtoMessage() {}
+func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) {
+ return fileDescriptor0, []int{2, 0}
+}
+
+func (m *DescriptorProto_ExtensionRange) GetStart() int32 {
+ if m != nil && m.Start != nil {
+ return *m.Start
+ }
+ return 0
+}
+
+func (m *DescriptorProto_ExtensionRange) GetEnd() int32 {
+ if m != nil && m.End != nil {
+ return *m.End
+ }
+ return 0
+}
+
+// Range of reserved tag numbers. Reserved tag numbers may not be used by
+// fields or extension ranges in the same message. Reserved ranges may
+// not overlap.
+type DescriptorProto_ReservedRange struct {
+ Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
+ End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_ReservedRange{} }
+func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) }
+func (*DescriptorProto_ReservedRange) ProtoMessage() {}
+func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) {
+ return fileDescriptor0, []int{2, 1}
+}
+
+func (m *DescriptorProto_ReservedRange) GetStart() int32 {
+ if m != nil && m.Start != nil {
+ return *m.Start
+ }
+ return 0
+}
+
+func (m *DescriptorProto_ReservedRange) GetEnd() int32 {
+ if m != nil && m.End != nil {
+ return *m.End
+ }
+ return 0
+}
+
+// Describes a field within a message.
+type FieldDescriptorProto struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Number *int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"`
+ Label *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"`
+ // If type_name is set, this need not be set. If both this and type_name
+ // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
+ Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"`
+ // For message and enum types, this is the name of the type. If the name
+ // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping
+ // rules are used to find the type (i.e. first the nested types within this
+ // message are searched, then within the parent, on up to the root
+ // namespace).
+ TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"`
+ // For extensions, this is the name of the type being extended. It is
+ // resolved in the same manner as type_name.
+ Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"`
+ // For numeric types, contains the original text representation of the value.
+ // For booleans, "true" or "false".
+ // For strings, contains the default text contents (not escaped in any way).
+ // For bytes, contains the C escaped value. All bytes >= 128 are escaped.
+ // TODO(kenton): Base-64 encode?
+ DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"`
+ // If set, gives the index of a oneof in the containing type's oneof_decl
+ // list. This field is a member of that oneof.
+ OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"`
+ // JSON name of this field. The value is set by protocol compiler. If the
+ // user has set a "json_name" option on this field, that option's value
+ // will be used. Otherwise, it's deduced from the field's name by converting
+ // it to camelCase.
+ JsonName *string `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"`
+ Options *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} }
+func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*FieldDescriptorProto) ProtoMessage() {}
+func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
+
+func (m *FieldDescriptorProto) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FieldDescriptorProto) GetNumber() int32 {
+ if m != nil && m.Number != nil {
+ return *m.Number
+ }
+ return 0
+}
+
+func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label {
+ if m != nil && m.Label != nil {
+ return *m.Label
+ }
+ return FieldDescriptorProto_LABEL_OPTIONAL
+}
+
+func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return FieldDescriptorProto_TYPE_DOUBLE
+}
+
+func (m *FieldDescriptorProto) GetTypeName() string {
+ if m != nil && m.TypeName != nil {
+ return *m.TypeName
+ }
+ return ""
+}
+
+func (m *FieldDescriptorProto) GetExtendee() string {
+ if m != nil && m.Extendee != nil {
+ return *m.Extendee
+ }
+ return ""
+}
+
+func (m *FieldDescriptorProto) GetDefaultValue() string {
+ if m != nil && m.DefaultValue != nil {
+ return *m.DefaultValue
+ }
+ return ""
+}
+
+func (m *FieldDescriptorProto) GetOneofIndex() int32 {
+ if m != nil && m.OneofIndex != nil {
+ return *m.OneofIndex
+ }
+ return 0
+}
+
+func (m *FieldDescriptorProto) GetJsonName() string {
+ if m != nil && m.JsonName != nil {
+ return *m.JsonName
+ }
+ return ""
+}
+
+func (m *FieldDescriptorProto) GetOptions() *FieldOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+// Describes a oneof.
+type OneofDescriptorProto struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} }
+func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*OneofDescriptorProto) ProtoMessage() {}
+func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
+
+func (m *OneofDescriptorProto) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *OneofDescriptorProto) GetOptions() *OneofOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+// Describes an enum type.
+type EnumDescriptorProto struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
+ Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} }
+func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*EnumDescriptorProto) ProtoMessage() {}
+func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
+
+func (m *EnumDescriptorProto) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *EnumDescriptorProto) GetOptions() *EnumOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+// Describes a value within an enum.
+type EnumValueDescriptorProto struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Number *int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"`
+ Options *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} }
+func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*EnumValueDescriptorProto) ProtoMessage() {}
+func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
+
+func (m *EnumValueDescriptorProto) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *EnumValueDescriptorProto) GetNumber() int32 {
+ if m != nil && m.Number != nil {
+ return *m.Number
+ }
+ return 0
+}
+
+func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+// Describes a service.
+type ServiceDescriptorProto struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Method []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"`
+ Options *ServiceOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} }
+func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*ServiceDescriptorProto) ProtoMessage() {}
+func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
+
+func (m *ServiceDescriptorProto) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto {
+ if m != nil {
+ return m.Method
+ }
+ return nil
+}
+
+func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+// Describes a method of a service.
+type MethodDescriptorProto struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ // Input and output type names. These are resolved in the same way as
+ // FieldDescriptorProto.type_name, but must refer to a message type.
+ InputType *string `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"`
+ OutputType *string `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"`
+ Options *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"`
+ // Identifies if client streams multiple client messages
+ ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"`
+ // Identifies if server streams multiple server messages
+ ServerStreaming *bool `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} }
+func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*MethodDescriptorProto) ProtoMessage() {}
+func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
+
+const Default_MethodDescriptorProto_ClientStreaming bool = false
+const Default_MethodDescriptorProto_ServerStreaming bool = false
+
+func (m *MethodDescriptorProto) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *MethodDescriptorProto) GetInputType() string {
+ if m != nil && m.InputType != nil {
+ return *m.InputType
+ }
+ return ""
+}
+
+func (m *MethodDescriptorProto) GetOutputType() string {
+ if m != nil && m.OutputType != nil {
+ return *m.OutputType
+ }
+ return ""
+}
+
+func (m *MethodDescriptorProto) GetOptions() *MethodOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+func (m *MethodDescriptorProto) GetClientStreaming() bool {
+ if m != nil && m.ClientStreaming != nil {
+ return *m.ClientStreaming
+ }
+ return Default_MethodDescriptorProto_ClientStreaming
+}
+
+func (m *MethodDescriptorProto) GetServerStreaming() bool {
+ if m != nil && m.ServerStreaming != nil {
+ return *m.ServerStreaming
+ }
+ return Default_MethodDescriptorProto_ServerStreaming
+}
+
+type FileOptions struct {
+ // Sets the Java package where classes generated from this .proto will be
+ // placed. By default, the proto package is used, but this is often
+ // inappropriate because proto packages do not normally start with backwards
+ // domain names.
+ JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"`
+ // If set, all the classes from the .proto file are wrapped in a single
+ // outer class with the given name. This applies to both Proto1
+ // (equivalent to the old "--one_java_file" option) and Proto2 (where
+ // a .proto always translates to a single class, but you may want to
+ // explicitly choose the class name).
+ JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"`
+ // If set true, then the Java code generator will generate a separate .java
+ // file for each top-level message, enum, and service defined in the .proto
+ // file. Thus, these types will *not* be nested inside the outer class
+ // named by java_outer_classname. However, the outer class will still be
+ // generated to contain the file's getDescriptor() method as well as any
+ // top-level extensions defined in the file.
+ JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"`
+ // This option does nothing.
+ JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"`
+ // If set true, then the Java2 code generator will generate code that
+ // throws an exception whenever an attempt is made to assign a non-UTF-8
+ // byte sequence to a string field.
+ // Message reflection will do the same.
+ // However, an extension field still accepts non-UTF-8 byte sequences.
+ // This option has no effect on when used with the lite runtime.
+ JavaStringCheckUtf8 *bool `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"`
+ OptimizeFor *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"`
+ // Sets the Go package where structs generated from this .proto will be
+ // placed. If omitted, the Go package will be derived from the following:
+ // - The basename of the package import path, if provided.
+ // - Otherwise, the package statement in the .proto file, if present.
+ // - Otherwise, the basename of the .proto file, without extension.
+ GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"`
+ // Should generic services be generated in each language? "Generic" services
+ // are not specific to any particular RPC system. They are generated by the
+ // main code generators in each language (without additional plugins).
+ // Generic services were the only kind of service generation supported by
+ // early versions of google.protobuf.
+ //
+ // Generic services are now considered deprecated in favor of using plugins
+ // that generate code specific to your particular RPC system. Therefore,
+ // these default to false. Old code which depends on generic services should
+ // explicitly set them to true.
+ CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"`
+ JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"`
+ PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"`
+ // Is this file deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for everything in the file, or it will be completely ignored; in the very
+ // least, this is a formalization for deprecating files.
+ Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ // Enables the use of arenas for the proto messages in this file. This applies
+ // only to generated classes for C++.
+ CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"`
+ // Sets the objective c class prefix which is prepended to all objective c
+ // generated classes from this .proto. There is no default.
+ ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"`
+ // Namespace for generated classes; defaults to the package.
+ CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"`
+ // The parser stores options it doesn't recognize here. See above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FileOptions) Reset() { *m = FileOptions{} }
+func (m *FileOptions) String() string { return proto.CompactTextString(m) }
+func (*FileOptions) ProtoMessage() {}
+func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
+
+var extRange_FileOptions = []proto.ExtensionRange{
+ {1000, 536870911},
+}
+
+func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_FileOptions
+}
+
+const Default_FileOptions_JavaMultipleFiles bool = false
+const Default_FileOptions_JavaStringCheckUtf8 bool = false
+const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED
+const Default_FileOptions_CcGenericServices bool = false
+const Default_FileOptions_JavaGenericServices bool = false
+const Default_FileOptions_PyGenericServices bool = false
+const Default_FileOptions_Deprecated bool = false
+const Default_FileOptions_CcEnableArenas bool = false
+
+func (m *FileOptions) GetJavaPackage() string {
+ if m != nil && m.JavaPackage != nil {
+ return *m.JavaPackage
+ }
+ return ""
+}
+
+func (m *FileOptions) GetJavaOuterClassname() string {
+ if m != nil && m.JavaOuterClassname != nil {
+ return *m.JavaOuterClassname
+ }
+ return ""
+}
+
+func (m *FileOptions) GetJavaMultipleFiles() bool {
+ if m != nil && m.JavaMultipleFiles != nil {
+ return *m.JavaMultipleFiles
+ }
+ return Default_FileOptions_JavaMultipleFiles
+}
+
+func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool {
+ if m != nil && m.JavaGenerateEqualsAndHash != nil {
+ return *m.JavaGenerateEqualsAndHash
+ }
+ return false
+}
+
+func (m *FileOptions) GetJavaStringCheckUtf8() bool {
+ if m != nil && m.JavaStringCheckUtf8 != nil {
+ return *m.JavaStringCheckUtf8
+ }
+ return Default_FileOptions_JavaStringCheckUtf8
+}
+
+func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode {
+ if m != nil && m.OptimizeFor != nil {
+ return *m.OptimizeFor
+ }
+ return Default_FileOptions_OptimizeFor
+}
+
+func (m *FileOptions) GetGoPackage() string {
+ if m != nil && m.GoPackage != nil {
+ return *m.GoPackage
+ }
+ return ""
+}
+
+func (m *FileOptions) GetCcGenericServices() bool {
+ if m != nil && m.CcGenericServices != nil {
+ return *m.CcGenericServices
+ }
+ return Default_FileOptions_CcGenericServices
+}
+
+func (m *FileOptions) GetJavaGenericServices() bool {
+ if m != nil && m.JavaGenericServices != nil {
+ return *m.JavaGenericServices
+ }
+ return Default_FileOptions_JavaGenericServices
+}
+
+func (m *FileOptions) GetPyGenericServices() bool {
+ if m != nil && m.PyGenericServices != nil {
+ return *m.PyGenericServices
+ }
+ return Default_FileOptions_PyGenericServices
+}
+
+func (m *FileOptions) GetDeprecated() bool {
+ if m != nil && m.Deprecated != nil {
+ return *m.Deprecated
+ }
+ return Default_FileOptions_Deprecated
+}
+
+func (m *FileOptions) GetCcEnableArenas() bool {
+ if m != nil && m.CcEnableArenas != nil {
+ return *m.CcEnableArenas
+ }
+ return Default_FileOptions_CcEnableArenas
+}
+
+func (m *FileOptions) GetObjcClassPrefix() string {
+ if m != nil && m.ObjcClassPrefix != nil {
+ return *m.ObjcClassPrefix
+ }
+ return ""
+}
+
+func (m *FileOptions) GetCsharpNamespace() string {
+ if m != nil && m.CsharpNamespace != nil {
+ return *m.CsharpNamespace
+ }
+ return ""
+}
+
+func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if m != nil {
+ return m.UninterpretedOption
+ }
+ return nil
+}
+
+type MessageOptions struct {
+ // Set true to use the old proto1 MessageSet wire format for extensions.
+ // This is provided for backwards-compatibility with the MessageSet wire
+ // format. You should not use this for any other reason: It's less
+ // efficient, has fewer features, and is more complicated.
+ //
+ // The message must be defined exactly as follows:
+ // message Foo {
+ // option message_set_wire_format = true;
+ // extensions 4 to max;
+ // }
+ // Note that the message cannot have any defined fields; MessageSets only
+ // have extensions.
+ //
+ // All extensions of your type must be singular messages; e.g. they cannot
+ // be int32s, enums, or repeated messages.
+ //
+ // Because this is an option, the above two restrictions are not enforced by
+ // the protocol compiler.
+ MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"`
+ // Disables the generation of the standard "descriptor()" accessor, which can
+ // conflict with a field of the same name. This is meant to make migration
+ // from proto1 easier; new code should avoid fields named "descriptor".
+ NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"`
+ // Is this message deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the message, or it will be completely ignored; in the very least,
+ // this is a formalization for deprecating messages.
+ Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ // Whether the message is an automatically generated map entry type for the
+ // maps field.
+ //
+ // For maps fields:
+ // map<KeyType, ValueType> map_field = 1;
+ // The parsed descriptor looks like:
+ // message MapFieldEntry {
+ // option map_entry = true;
+ // optional KeyType key = 1;
+ // optional ValueType value = 2;
+ // }
+ // repeated MapFieldEntry map_field = 1;
+ //
+ // Implementations may choose not to generate the map_entry=true message, but
+ // use a native map in the target language to hold the keys and values.
+ // The reflection APIs in such implementions still need to work as
+ // if the field is a repeated message field.
+ //
+ // NOTE: Do not set the option in .proto files. Always use the maps syntax
+ // instead. The option should only be implicitly set by the proto compiler
+ // parser.
+ MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"`
+ // The parser stores options it doesn't recognize here. See above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MessageOptions) Reset() { *m = MessageOptions{} }
+func (m *MessageOptions) String() string { return proto.CompactTextString(m) }
+func (*MessageOptions) ProtoMessage() {}
+func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
+
+var extRange_MessageOptions = []proto.ExtensionRange{
+ {1000, 536870911},
+}
+
+func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_MessageOptions
+}
+
+const Default_MessageOptions_MessageSetWireFormat bool = false
+const Default_MessageOptions_NoStandardDescriptorAccessor bool = false
+const Default_MessageOptions_Deprecated bool = false
+
+func (m *MessageOptions) GetMessageSetWireFormat() bool {
+ if m != nil && m.MessageSetWireFormat != nil {
+ return *m.MessageSetWireFormat
+ }
+ return Default_MessageOptions_MessageSetWireFormat
+}
+
+func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool {
+ if m != nil && m.NoStandardDescriptorAccessor != nil {
+ return *m.NoStandardDescriptorAccessor
+ }
+ return Default_MessageOptions_NoStandardDescriptorAccessor
+}
+
+func (m *MessageOptions) GetDeprecated() bool {
+ if m != nil && m.Deprecated != nil {
+ return *m.Deprecated
+ }
+ return Default_MessageOptions_Deprecated
+}
+
+func (m *MessageOptions) GetMapEntry() bool {
+ if m != nil && m.MapEntry != nil {
+ return *m.MapEntry
+ }
+ return false
+}
+
+func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if m != nil {
+ return m.UninterpretedOption
+ }
+ return nil
+}
+
+type FieldOptions struct {
+ // The ctype option instructs the C++ code generator to use a different
+ // representation of the field than it normally would. See the specific
+ // options below. This option is not yet implemented in the open source
+ // release -- sorry, we'll try to include it in a future version!
+ Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"`
+ // The packed option can be enabled for repeated primitive fields to enable
+ // a more efficient representation on the wire. Rather than repeatedly
+ // writing the tag and type for each element, the entire array is encoded as
+ // a single length-delimited blob. In proto3, only explicit setting it to
+ // false will avoid using packed encoding.
+ Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"`
+ // The jstype option determines the JavaScript type used for values of the
+ // field. The option is permitted only for 64 bit integral and fixed types
+ // (int64, uint64, sint64, fixed64, sfixed64). By default these types are
+ // represented as JavaScript strings. This avoids loss of precision that can
+ // happen when a large value is converted to a floating point JavaScript
+ // numbers. Specifying JS_NUMBER for the jstype causes the generated
+ // JavaScript code to use the JavaScript "number" type instead of strings.
+ // This option is an enum to permit additional types to be added,
+ // e.g. goog.math.Integer.
+ Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"`
+ // Should this field be parsed lazily? Lazy applies only to message-type
+ // fields. It means that when the outer message is initially parsed, the
+ // inner message's contents will not be parsed but instead stored in encoded
+ // form. The inner message will actually be parsed when it is first accessed.
+ //
+ // This is only a hint. Implementations are free to choose whether to use
+ // eager or lazy parsing regardless of the value of this option. However,
+ // setting this option true suggests that the protocol author believes that
+ // using lazy parsing on this field is worth the additional bookkeeping
+ // overhead typically needed to implement it.
+ //
+ // This option does not affect the public interface of any generated code;
+ // all method signatures remain the same. Furthermore, thread-safety of the
+ // interface is not affected by this option; const methods remain safe to
+ // call from multiple threads concurrently, while non-const methods continue
+ // to require exclusive access.
+ //
+ //
+ // Note that implementations may choose not to check required fields within
+ // a lazy sub-message. That is, calling IsInitialized() on the outer message
+ // may return true even if the inner message has missing required fields.
+ // This is necessary because otherwise the inner message would have to be
+ // parsed in order to perform the check, defeating the purpose of lazy
+ // parsing. An implementation which chooses not to check required fields
+ // must be consistent about it. That is, for any particular sub-message, the
+ // implementation must either *always* check its required fields, or *never*
+ // check its required fields, regardless of whether or not the message has
+ // been parsed.
+ Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"`
+ // Is this field deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for accessors, or it will be completely ignored; in the very least, this
+ // is a formalization for deprecating fields.
+ Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ // For Google-internal migration only. Do not use.
+ Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"`
+ // The parser stores options it doesn't recognize here. See above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FieldOptions) Reset() { *m = FieldOptions{} }
+func (m *FieldOptions) String() string { return proto.CompactTextString(m) }
+func (*FieldOptions) ProtoMessage() {}
+func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} }
+
+var extRange_FieldOptions = []proto.ExtensionRange{
+ {1000, 536870911},
+}
+
+func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_FieldOptions
+}
+
+const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING
+const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL
+const Default_FieldOptions_Lazy bool = false
+const Default_FieldOptions_Deprecated bool = false
+const Default_FieldOptions_Weak bool = false
+
+func (m *FieldOptions) GetCtype() FieldOptions_CType {
+ if m != nil && m.Ctype != nil {
+ return *m.Ctype
+ }
+ return Default_FieldOptions_Ctype
+}
+
+func (m *FieldOptions) GetPacked() bool {
+ if m != nil && m.Packed != nil {
+ return *m.Packed
+ }
+ return false
+}
+
+func (m *FieldOptions) GetJstype() FieldOptions_JSType {
+ if m != nil && m.Jstype != nil {
+ return *m.Jstype
+ }
+ return Default_FieldOptions_Jstype
+}
+
+func (m *FieldOptions) GetLazy() bool {
+ if m != nil && m.Lazy != nil {
+ return *m.Lazy
+ }
+ return Default_FieldOptions_Lazy
+}
+
+func (m *FieldOptions) GetDeprecated() bool {
+ if m != nil && m.Deprecated != nil {
+ return *m.Deprecated
+ }
+ return Default_FieldOptions_Deprecated
+}
+
+func (m *FieldOptions) GetWeak() bool {
+ if m != nil && m.Weak != nil {
+ return *m.Weak
+ }
+ return Default_FieldOptions_Weak
+}
+
+func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if m != nil {
+ return m.UninterpretedOption
+ }
+ return nil
+}
+
+type OneofOptions struct {
+ // The parser stores options it doesn't recognize here. See above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *OneofOptions) Reset() { *m = OneofOptions{} }
+func (m *OneofOptions) String() string { return proto.CompactTextString(m) }
+func (*OneofOptions) ProtoMessage() {}
+func (*OneofOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} }
+
+var extRange_OneofOptions = []proto.ExtensionRange{
+ {1000, 536870911},
+}
+
+func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_OneofOptions
+}
+
+func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if m != nil {
+ return m.UninterpretedOption
+ }
+ return nil
+}
+
+type EnumOptions struct {
+ // Set this option to true to allow mapping different tag names to the same
+ // value.
+ AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"`
+ // Is this enum deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the enum, or it will be completely ignored; in the very least, this
+ // is a formalization for deprecating enums.
+ Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ // The parser stores options it doesn't recognize here. See above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *EnumOptions) Reset() { *m = EnumOptions{} }
+func (m *EnumOptions) String() string { return proto.CompactTextString(m) }
+func (*EnumOptions) ProtoMessage() {}
+func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} }
+
+var extRange_EnumOptions = []proto.ExtensionRange{
+ {1000, 536870911},
+}
+
+func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_EnumOptions
+}
+
+const Default_EnumOptions_Deprecated bool = false
+
+func (m *EnumOptions) GetAllowAlias() bool {
+ if m != nil && m.AllowAlias != nil {
+ return *m.AllowAlias
+ }
+ return false
+}
+
+func (m *EnumOptions) GetDeprecated() bool {
+ if m != nil && m.Deprecated != nil {
+ return *m.Deprecated
+ }
+ return Default_EnumOptions_Deprecated
+}
+
+func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if m != nil {
+ return m.UninterpretedOption
+ }
+ return nil
+}
+
+type EnumValueOptions struct {
+ // Is this enum value deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the enum value, or it will be completely ignored; in the very least,
+ // this is a formalization for deprecating enum values.
+ Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ // The parser stores options it doesn't recognize here. See above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} }
+func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) }
+func (*EnumValueOptions) ProtoMessage() {}
+func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} }
+
+var extRange_EnumValueOptions = []proto.ExtensionRange{
+ {1000, 536870911},
+}
+
+func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_EnumValueOptions
+}
+
+const Default_EnumValueOptions_Deprecated bool = false
+
+func (m *EnumValueOptions) GetDeprecated() bool {
+ if m != nil && m.Deprecated != nil {
+ return *m.Deprecated
+ }
+ return Default_EnumValueOptions_Deprecated
+}
+
+func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if m != nil {
+ return m.UninterpretedOption
+ }
+ return nil
+}
+
+type ServiceOptions struct {
+ // Is this service deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the service, or it will be completely ignored; in the very least,
+ // this is a formalization for deprecating services.
+ Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ // The parser stores options it doesn't recognize here. See above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ServiceOptions) Reset() { *m = ServiceOptions{} }
+func (m *ServiceOptions) String() string { return proto.CompactTextString(m) }
+func (*ServiceOptions) ProtoMessage() {}
+func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} }
+
+var extRange_ServiceOptions = []proto.ExtensionRange{
+ {1000, 536870911},
+}
+
+func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_ServiceOptions
+}
+
+const Default_ServiceOptions_Deprecated bool = false
+
+func (m *ServiceOptions) GetDeprecated() bool {
+ if m != nil && m.Deprecated != nil {
+ return *m.Deprecated
+ }
+ return Default_ServiceOptions_Deprecated
+}
+
+func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if m != nil {
+ return m.UninterpretedOption
+ }
+ return nil
+}
+
+type MethodOptions struct {
+ // Is this method deprecated?
+ // Depending on the target platform, this can emit Deprecated annotations
+ // for the method, or it will be completely ignored; in the very least,
+ // this is a formalization for deprecating methods.
+ Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+ // The parser stores options it doesn't recognize here. See above.
+ UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+ proto.XXX_InternalExtensions `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MethodOptions) Reset() { *m = MethodOptions{} }
+func (m *MethodOptions) String() string { return proto.CompactTextString(m) }
+func (*MethodOptions) ProtoMessage() {}
+func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} }
+
+var extRange_MethodOptions = []proto.ExtensionRange{
+ {1000, 536870911},
+}
+
+func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_MethodOptions
+}
+
+const Default_MethodOptions_Deprecated bool = false
+
+func (m *MethodOptions) GetDeprecated() bool {
+ if m != nil && m.Deprecated != nil {
+ return *m.Deprecated
+ }
+ return Default_MethodOptions_Deprecated
+}
+
+func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption {
+ if m != nil {
+ return m.UninterpretedOption
+ }
+ return nil
+}
+
+// A message representing a option the parser does not recognize. This only
+// appears in options protos created by the compiler::Parser class.
+// DescriptorPool resolves these when building Descriptor objects. Therefore,
+// options protos in descriptor objects (e.g. returned by Descriptor::options(),
+// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
+// in them.
+type UninterpretedOption struct {
+ Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"`
+ // The value of the uninterpreted option, in whatever type the tokenizer
+ // identified it as during parsing. Exactly one of these should be set.
+ IdentifierValue *string `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"`
+ PositiveIntValue *uint64 `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"`
+ NegativeIntValue *int64 `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"`
+ DoubleValue *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"`
+ StringValue []byte `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"`
+ AggregateValue *string `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} }
+func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) }
+func (*UninterpretedOption) ProtoMessage() {}
+func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} }
+
+func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart {
+ if m != nil {
+ return m.Name
+ }
+ return nil
+}
+
+func (m *UninterpretedOption) GetIdentifierValue() string {
+ if m != nil && m.IdentifierValue != nil {
+ return *m.IdentifierValue
+ }
+ return ""
+}
+
+func (m *UninterpretedOption) GetPositiveIntValue() uint64 {
+ if m != nil && m.PositiveIntValue != nil {
+ return *m.PositiveIntValue
+ }
+ return 0
+}
+
+func (m *UninterpretedOption) GetNegativeIntValue() int64 {
+ if m != nil && m.NegativeIntValue != nil {
+ return *m.NegativeIntValue
+ }
+ return 0
+}
+
+func (m *UninterpretedOption) GetDoubleValue() float64 {
+ if m != nil && m.DoubleValue != nil {
+ return *m.DoubleValue
+ }
+ return 0
+}
+
+func (m *UninterpretedOption) GetStringValue() []byte {
+ if m != nil {
+ return m.StringValue
+ }
+ return nil
+}
+
+func (m *UninterpretedOption) GetAggregateValue() string {
+ if m != nil && m.AggregateValue != nil {
+ return *m.AggregateValue
+ }
+ return ""
+}
+
+// The name of the uninterpreted option. Each string represents a segment in
+// a dot-separated name. is_extension is true iff a segment represents an
+// extension (denoted with parentheses in options specs in .proto files).
+// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
+// "foo.(bar.baz).qux".
+type UninterpretedOption_NamePart struct {
+ NamePart *string `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"`
+ IsExtension *bool `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOption_NamePart{} }
+func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) }
+func (*UninterpretedOption_NamePart) ProtoMessage() {}
+func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) {
+ return fileDescriptor0, []int{17, 0}
+}
+
+func (m *UninterpretedOption_NamePart) GetNamePart() string {
+ if m != nil && m.NamePart != nil {
+ return *m.NamePart
+ }
+ return ""
+}
+
+func (m *UninterpretedOption_NamePart) GetIsExtension() bool {
+ if m != nil && m.IsExtension != nil {
+ return *m.IsExtension
+ }
+ return false
+}
+
+// Encapsulates information about the original source file from which a
+// FileDescriptorProto was generated.
+type SourceCodeInfo struct {
+ // A Location identifies a piece of source code in a .proto file which
+ // corresponds to a particular definition. This information is intended
+ // to be useful to IDEs, code indexers, documentation generators, and similar
+ // tools.
+ //
+ // For example, say we have a file like:
+ // message Foo {
+ // optional string foo = 1;
+ // }
+ // Let's look at just the field definition:
+ // optional string foo = 1;
+ // ^ ^^ ^^ ^ ^^^
+ // a bc de f ghi
+ // We have the following locations:
+ // span path represents
+ // [a,i) [ 4, 0, 2, 0 ] The whole field definition.
+ // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional).
+ // [c,d) [ 4, 0, 2, 0, 5 ] The type (string).
+ // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo).
+ // [g,h) [ 4, 0, 2, 0, 3 ] The number (1).
+ //
+ // Notes:
+ // - A location may refer to a repeated field itself (i.e. not to any
+ // particular index within it). This is used whenever a set of elements are
+ // logically enclosed in a single code segment. For example, an entire
+ // extend block (possibly containing multiple extension definitions) will
+ // have an outer location whose path refers to the "extensions" repeated
+ // field without an index.
+ // - Multiple locations may have the same path. This happens when a single
+ // logical declaration is spread out across multiple places. The most
+ // obvious example is the "extend" block again -- there may be multiple
+ // extend blocks in the same scope, each of which will have the same path.
+ // - A location's span is not always a subset of its parent's span. For
+ // example, the "extendee" of an extension declaration appears at the
+ // beginning of the "extend" block and is shared by all extensions within
+ // the block.
+ // - Just because a location's span is a subset of some other location's span
+ // does not mean that it is a descendent. For example, a "group" defines
+ // both a type and a field in a single declaration. Thus, the locations
+ // corresponding to the type and field and their components will overlap.
+ // - Code which tries to interpret locations should probably be designed to
+ // ignore those that it doesn't understand, as more types of locations could
+ // be recorded in the future.
+ Location []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} }
+func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) }
+func (*SourceCodeInfo) ProtoMessage() {}
+func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} }
+
+func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location {
+ if m != nil {
+ return m.Location
+ }
+ return nil
+}
+
+type SourceCodeInfo_Location struct {
+ // Identifies which part of the FileDescriptorProto was defined at this
+ // location.
+ //
+ // Each element is a field number or an index. They form a path from
+ // the root FileDescriptorProto to the place where the definition. For
+ // example, this path:
+ // [ 4, 3, 2, 7, 1 ]
+ // refers to:
+ // file.message_type(3) // 4, 3
+ // .field(7) // 2, 7
+ // .name() // 1
+ // This is because FileDescriptorProto.message_type has field number 4:
+ // repeated DescriptorProto message_type = 4;
+ // and DescriptorProto.field has field number 2:
+ // repeated FieldDescriptorProto field = 2;
+ // and FieldDescriptorProto.name has field number 1:
+ // optional string name = 1;
+ //
+ // Thus, the above path gives the location of a field name. If we removed
+ // the last element:
+ // [ 4, 3, 2, 7 ]
+ // this path refers to the whole field declaration (from the beginning
+ // of the label to the terminating semicolon).
+ Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"`
+ // Always has exactly three or four elements: start line, start column,
+ // end line (optional, otherwise assumed same as start line), end column.
+ // These are packed into a single field for efficiency. Note that line
+ // and column numbers are zero-based -- typically you will want to add
+ // 1 to each before displaying to a user.
+ Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"`
+ // If this SourceCodeInfo represents a complete declaration, these are any
+ // comments appearing before and after the declaration which appear to be
+ // attached to the declaration.
+ //
+ // A series of line comments appearing on consecutive lines, with no other
+ // tokens appearing on those lines, will be treated as a single comment.
+ //
+ // leading_detached_comments will keep paragraphs of comments that appear
+ // before (but not connected to) the current element. Each paragraph,
+ // separated by empty lines, will be one comment element in the repeated
+ // field.
+ //
+ // Only the comment content is provided; comment markers (e.g. //) are
+ // stripped out. For block comments, leading whitespace and an asterisk
+ // will be stripped from the beginning of each line other than the first.
+ // Newlines are included in the output.
+ //
+ // Examples:
+ //
+ // optional int32 foo = 1; // Comment attached to foo.
+ // // Comment attached to bar.
+ // optional int32 bar = 2;
+ //
+ // optional string baz = 3;
+ // // Comment attached to baz.
+ // // Another line attached to baz.
+ //
+ // // Comment attached to qux.
+ // //
+ // // Another line attached to qux.
+ // optional double qux = 4;
+ //
+ // // Detached comment for corge. This is not leading or trailing comments
+ // // to qux or corge because there are blank lines separating it from
+ // // both.
+ //
+ // // Detached comment for corge paragraph 2.
+ //
+ // optional string corge = 5;
+ // /* Block comment attached
+ // * to corge. Leading asterisks
+ // * will be removed. */
+ // /* Block comment attached to
+ // * grault. */
+ // optional int32 grault = 6;
+ //
+ // // ignored detached comments.
+ LeadingComments *string `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"`
+ TrailingComments *string `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"`
+ LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} }
+func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) }
+func (*SourceCodeInfo_Location) ProtoMessage() {}
+func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18, 0} }
+
+func (m *SourceCodeInfo_Location) GetPath() []int32 {
+ if m != nil {
+ return m.Path
+ }
+ return nil
+}
+
+func (m *SourceCodeInfo_Location) GetSpan() []int32 {
+ if m != nil {
+ return m.Span
+ }
+ return nil
+}
+
+func (m *SourceCodeInfo_Location) GetLeadingComments() string {
+ if m != nil && m.LeadingComments != nil {
+ return *m.LeadingComments
+ }
+ return ""
+}
+
+func (m *SourceCodeInfo_Location) GetTrailingComments() string {
+ if m != nil && m.TrailingComments != nil {
+ return *m.TrailingComments
+ }
+ return ""
+}
+
+func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string {
+ if m != nil {
+ return m.LeadingDetachedComments
+ }
+ return nil
+}
+
+// Describes the relationship between generated code and its original source
+// file. A GeneratedCodeInfo message is associated with only one generated
+// source file, but may contain references to different source .proto files.
+type GeneratedCodeInfo struct {
+ // An Annotation connects some span of text in generated code to an element
+ // of its generating .proto file.
+ Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} }
+func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) }
+func (*GeneratedCodeInfo) ProtoMessage() {}
+func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} }
+
+func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation {
+ if m != nil {
+ return m.Annotation
+ }
+ return nil
+}
+
+type GeneratedCodeInfo_Annotation struct {
+ // Identifies the element in the original source .proto file. This field
+ // is formatted the same as SourceCodeInfo.Location.path.
+ Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"`
+ // Identifies the filesystem path to the original source .proto.
+ SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"`
+ // Identifies the starting offset in bytes in the generated code
+ // that relates to the identified object.
+ Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"`
+ // Identifies the ending offset in bytes in the generated code that
+ // relates to the identified offset. The end offset should be one past
+ // the last relevant byte (so the length of the text = end - begin).
+ End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} }
+func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) }
+func (*GeneratedCodeInfo_Annotation) ProtoMessage() {}
+func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) {
+ return fileDescriptor0, []int{19, 0}
+}
+
+func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 {
+ if m != nil {
+ return m.Path
+ }
+ return nil
+}
+
+func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string {
+ if m != nil && m.SourceFile != nil {
+ return *m.SourceFile
+ }
+ return ""
+}
+
+func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 {
+ if m != nil && m.Begin != nil {
+ return *m.Begin
+ }
+ return 0
+}
+
+func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 {
+ if m != nil && m.End != nil {
+ return *m.End
+ }
+ return 0
+}
+
+func init() {
+ proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet")
+ proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto")
+ proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto")
+ proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange")
+ proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange")
+ proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto")
+ proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto")
+ proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto")
+ proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto")
+ proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto")
+ proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto")
+ proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions")
+ proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions")
+ proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions")
+ proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions")
+ proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions")
+ proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions")
+ proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions")
+ proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions")
+ proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption")
+ proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart")
+ proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo")
+ proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location")
+ proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo")
+ proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation")
+ proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value)
+ proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value)
+ proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value)
+ proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value)
+ proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value)
+}
+
+func init() { proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor0) }
+
+var fileDescriptor0 = []byte{
+ // 2295 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x59, 0x4f, 0x6f, 0x1b, 0xc7,
+ 0x15, 0xcf, 0xf2, 0x9f, 0xc8, 0x47, 0x8a, 0x1a, 0x8d, 0x14, 0x67, 0xad, 0xfc, 0xb1, 0xcc, 0xd8,
+ 0xb1, 0x6c, 0xb7, 0x74, 0x20, 0xff, 0x89, 0xa3, 0x14, 0x29, 0x28, 0x71, 0xad, 0xd0, 0x90, 0x44,
+ 0x76, 0x29, 0xb5, 0x4e, 0x2e, 0x8b, 0xd1, 0xee, 0x90, 0x5a, 0x7b, 0x39, 0xbb, 0xdd, 0x5d, 0xda,
+ 0x56, 0x4e, 0x06, 0x7a, 0xea, 0xa5, 0xe7, 0xa2, 0x2d, 0x7a, 0xc8, 0x25, 0x40, 0x3f, 0x40, 0x0f,
+ 0xfd, 0x0a, 0x05, 0x0a, 0xf4, 0x2b, 0x14, 0x05, 0xda, 0x6f, 0xd0, 0x6b, 0x31, 0x33, 0xbb, 0xcb,
+ 0x5d, 0xfe, 0x89, 0xd5, 0x00, 0x49, 0x7a, 0x12, 0xe7, 0xf7, 0x7e, 0xef, 0xcd, 0x9b, 0x37, 0x6f,
+ 0xde, 0xbc, 0x1d, 0xc1, 0xe6, 0xd0, 0x75, 0x87, 0x0e, 0xbd, 0xe3, 0xf9, 0x6e, 0xe8, 0x9e, 0x8e,
+ 0x07, 0x77, 0x2c, 0x1a, 0x98, 0xbe, 0xed, 0x85, 0xae, 0xdf, 0x14, 0x18, 0x5e, 0x91, 0x8c, 0x66,
+ 0xcc, 0x68, 0x1c, 0xc2, 0xea, 0x23, 0xdb, 0xa1, 0xed, 0x84, 0xd8, 0xa7, 0x21, 0x7e, 0x08, 0x85,
+ 0x81, 0xed, 0x50, 0x55, 0xd9, 0xcc, 0x6f, 0x55, 0xb7, 0xaf, 0x35, 0xa7, 0x94, 0x9a, 0x59, 0x8d,
+ 0x1e, 0x87, 0x75, 0xa1, 0xd1, 0xf8, 0x67, 0x01, 0xd6, 0xe6, 0x48, 0x31, 0x86, 0x02, 0x23, 0x23,
+ 0x6e, 0x51, 0xd9, 0xaa, 0xe8, 0xe2, 0x37, 0x56, 0x61, 0xc9, 0x23, 0xe6, 0x33, 0x32, 0xa4, 0x6a,
+ 0x4e, 0xc0, 0xf1, 0x10, 0xbf, 0x07, 0x60, 0x51, 0x8f, 0x32, 0x8b, 0x32, 0xf3, 0x5c, 0xcd, 0x6f,
+ 0xe6, 0xb7, 0x2a, 0x7a, 0x0a, 0xc1, 0xb7, 0x61, 0xd5, 0x1b, 0x9f, 0x3a, 0xb6, 0x69, 0xa4, 0x68,
+ 0xb0, 0x99, 0xdf, 0x2a, 0xea, 0x48, 0x0a, 0xda, 0x13, 0xf2, 0x0d, 0x58, 0x79, 0x41, 0xc9, 0xb3,
+ 0x34, 0xb5, 0x2a, 0xa8, 0x75, 0x0e, 0xa7, 0x88, 0x7b, 0x50, 0x1b, 0xd1, 0x20, 0x20, 0x43, 0x6a,
+ 0x84, 0xe7, 0x1e, 0x55, 0x0b, 0x62, 0xf5, 0x9b, 0x33, 0xab, 0x9f, 0x5e, 0x79, 0x35, 0xd2, 0x3a,
+ 0x3e, 0xf7, 0x28, 0x6e, 0x41, 0x85, 0xb2, 0xf1, 0x48, 0x5a, 0x28, 0x2e, 0x88, 0x9f, 0xc6, 0xc6,
+ 0xa3, 0x69, 0x2b, 0x65, 0xae, 0x16, 0x99, 0x58, 0x0a, 0xa8, 0xff, 0xdc, 0x36, 0xa9, 0x5a, 0x12,
+ 0x06, 0x6e, 0xcc, 0x18, 0xe8, 0x4b, 0xf9, 0xb4, 0x8d, 0x58, 0x0f, 0xef, 0x41, 0x85, 0xbe, 0x0c,
+ 0x29, 0x0b, 0x6c, 0x97, 0xa9, 0x4b, 0xc2, 0xc8, 0xf5, 0x39, 0xbb, 0x48, 0x1d, 0x6b, 0xda, 0xc4,
+ 0x44, 0x0f, 0x3f, 0x80, 0x25, 0xd7, 0x0b, 0x6d, 0x97, 0x05, 0x6a, 0x79, 0x53, 0xd9, 0xaa, 0x6e,
+ 0xbf, 0x33, 0x37, 0x11, 0xba, 0x92, 0xa3, 0xc7, 0x64, 0xdc, 0x01, 0x14, 0xb8, 0x63, 0xdf, 0xa4,
+ 0x86, 0xe9, 0x5a, 0xd4, 0xb0, 0xd9, 0xc0, 0x55, 0x2b, 0xc2, 0xc0, 0x95, 0xd9, 0x85, 0x08, 0xe2,
+ 0x9e, 0x6b, 0xd1, 0x0e, 0x1b, 0xb8, 0x7a, 0x3d, 0xc8, 0x8c, 0xf1, 0x25, 0x28, 0x05, 0xe7, 0x2c,
+ 0x24, 0x2f, 0xd5, 0x9a, 0xc8, 0x90, 0x68, 0xd4, 0xf8, 0x4f, 0x11, 0x56, 0x2e, 0x92, 0x62, 0x9f,
+ 0x40, 0x71, 0xc0, 0x57, 0xa9, 0xe6, 0xfe, 0x97, 0x18, 0x48, 0x9d, 0x6c, 0x10, 0x4b, 0xdf, 0x32,
+ 0x88, 0x2d, 0xa8, 0x32, 0x1a, 0x84, 0xd4, 0x92, 0x19, 0x91, 0xbf, 0x60, 0x4e, 0x81, 0x54, 0x9a,
+ 0x4d, 0xa9, 0xc2, 0xb7, 0x4a, 0xa9, 0x27, 0xb0, 0x92, 0xb8, 0x64, 0xf8, 0x84, 0x0d, 0xe3, 0xdc,
+ 0xbc, 0xf3, 0x3a, 0x4f, 0x9a, 0x5a, 0xac, 0xa7, 0x73, 0x35, 0xbd, 0x4e, 0x33, 0x63, 0xdc, 0x06,
+ 0x70, 0x19, 0x75, 0x07, 0x86, 0x45, 0x4d, 0x47, 0x2d, 0x2f, 0x88, 0x52, 0x97, 0x53, 0x66, 0xa2,
+ 0xe4, 0x4a, 0xd4, 0x74, 0xf0, 0xc7, 0x93, 0x54, 0x5b, 0x5a, 0x90, 0x29, 0x87, 0xf2, 0x90, 0xcd,
+ 0x64, 0xdb, 0x09, 0xd4, 0x7d, 0xca, 0xf3, 0x9e, 0x5a, 0xd1, 0xca, 0x2a, 0xc2, 0x89, 0xe6, 0x6b,
+ 0x57, 0xa6, 0x47, 0x6a, 0x72, 0x61, 0xcb, 0x7e, 0x7a, 0x88, 0xdf, 0x87, 0x04, 0x30, 0x44, 0x5a,
+ 0x81, 0xa8, 0x42, 0xb5, 0x18, 0x3c, 0x22, 0x23, 0xba, 0xf1, 0x10, 0xea, 0xd9, 0xf0, 0xe0, 0x75,
+ 0x28, 0x06, 0x21, 0xf1, 0x43, 0x91, 0x85, 0x45, 0x5d, 0x0e, 0x30, 0x82, 0x3c, 0x65, 0x96, 0xa8,
+ 0x72, 0x45, 0x9d, 0xff, 0xdc, 0xf8, 0x08, 0x96, 0x33, 0xd3, 0x5f, 0x54, 0xb1, 0xf1, 0xdb, 0x12,
+ 0xac, 0xcf, 0xcb, 0xb9, 0xb9, 0xe9, 0x7f, 0x09, 0x4a, 0x6c, 0x3c, 0x3a, 0xa5, 0xbe, 0x9a, 0x17,
+ 0x16, 0xa2, 0x11, 0x6e, 0x41, 0xd1, 0x21, 0xa7, 0xd4, 0x51, 0x0b, 0x9b, 0xca, 0x56, 0x7d, 0xfb,
+ 0xf6, 0x85, 0xb2, 0xba, 0x79, 0xc0, 0x55, 0x74, 0xa9, 0x89, 0x3f, 0x85, 0x42, 0x54, 0xe2, 0xb8,
+ 0x85, 0x5b, 0x17, 0xb3, 0xc0, 0x73, 0x51, 0x17, 0x7a, 0xf8, 0x6d, 0xa8, 0xf0, 0xbf, 0x32, 0xb6,
+ 0x25, 0xe1, 0x73, 0x99, 0x03, 0x3c, 0xae, 0x78, 0x03, 0xca, 0x22, 0xcd, 0x2c, 0x1a, 0x5f, 0x0d,
+ 0xc9, 0x98, 0x6f, 0x8c, 0x45, 0x07, 0x64, 0xec, 0x84, 0xc6, 0x73, 0xe2, 0x8c, 0xa9, 0x48, 0x98,
+ 0x8a, 0x5e, 0x8b, 0xc0, 0x9f, 0x73, 0x0c, 0x5f, 0x81, 0xaa, 0xcc, 0x4a, 0x9b, 0x59, 0xf4, 0xa5,
+ 0xa8, 0x3e, 0x45, 0x5d, 0x26, 0x6a, 0x87, 0x23, 0x7c, 0xfa, 0xa7, 0x81, 0xcb, 0xe2, 0xad, 0x15,
+ 0x53, 0x70, 0x40, 0x4c, 0xff, 0xd1, 0x74, 0xe1, 0x7b, 0x77, 0xfe, 0xf2, 0xa6, 0x73, 0xb1, 0xf1,
+ 0xe7, 0x1c, 0x14, 0xc4, 0x79, 0x5b, 0x81, 0xea, 0xf1, 0xe7, 0x3d, 0xcd, 0x68, 0x77, 0x4f, 0x76,
+ 0x0f, 0x34, 0xa4, 0xe0, 0x3a, 0x80, 0x00, 0x1e, 0x1d, 0x74, 0x5b, 0xc7, 0x28, 0x97, 0x8c, 0x3b,
+ 0x47, 0xc7, 0x0f, 0xee, 0xa1, 0x7c, 0xa2, 0x70, 0x22, 0x81, 0x42, 0x9a, 0x70, 0x77, 0x1b, 0x15,
+ 0x31, 0x82, 0x9a, 0x34, 0xd0, 0x79, 0xa2, 0xb5, 0x1f, 0xdc, 0x43, 0xa5, 0x2c, 0x72, 0x77, 0x1b,
+ 0x2d, 0xe1, 0x65, 0xa8, 0x08, 0x64, 0xb7, 0xdb, 0x3d, 0x40, 0xe5, 0xc4, 0x66, 0xff, 0x58, 0xef,
+ 0x1c, 0xed, 0xa3, 0x4a, 0x62, 0x73, 0x5f, 0xef, 0x9e, 0xf4, 0x10, 0x24, 0x16, 0x0e, 0xb5, 0x7e,
+ 0xbf, 0xb5, 0xaf, 0xa1, 0x6a, 0xc2, 0xd8, 0xfd, 0xfc, 0x58, 0xeb, 0xa3, 0x5a, 0xc6, 0xad, 0xbb,
+ 0xdb, 0x68, 0x39, 0x99, 0x42, 0x3b, 0x3a, 0x39, 0x44, 0x75, 0xbc, 0x0a, 0xcb, 0x72, 0x8a, 0xd8,
+ 0x89, 0x95, 0x29, 0xe8, 0xc1, 0x3d, 0x84, 0x26, 0x8e, 0x48, 0x2b, 0xab, 0x19, 0xe0, 0xc1, 0x3d,
+ 0x84, 0x1b, 0x7b, 0x50, 0x14, 0xd9, 0x85, 0x31, 0xd4, 0x0f, 0x5a, 0xbb, 0xda, 0x81, 0xd1, 0xed,
+ 0x1d, 0x77, 0xba, 0x47, 0xad, 0x03, 0xa4, 0x4c, 0x30, 0x5d, 0xfb, 0xd9, 0x49, 0x47, 0xd7, 0xda,
+ 0x28, 0x97, 0xc6, 0x7a, 0x5a, 0xeb, 0x58, 0x6b, 0xa3, 0x7c, 0xc3, 0x84, 0xf5, 0x79, 0x75, 0x66,
+ 0xee, 0xc9, 0x48, 0x6d, 0x71, 0x6e, 0xc1, 0x16, 0x0b, 0x5b, 0x33, 0x5b, 0xfc, 0x95, 0x02, 0x6b,
+ 0x73, 0x6a, 0xed, 0xdc, 0x49, 0x7e, 0x0a, 0x45, 0x99, 0xa2, 0xf2, 0xf6, 0xb9, 0x39, 0xb7, 0x68,
+ 0x8b, 0x84, 0x9d, 0xb9, 0x81, 0x84, 0x5e, 0xfa, 0x06, 0xce, 0x2f, 0xb8, 0x81, 0xb9, 0x89, 0x19,
+ 0x27, 0x7f, 0xa5, 0x80, 0xba, 0xc8, 0xf6, 0x6b, 0x0a, 0x45, 0x2e, 0x53, 0x28, 0x3e, 0x99, 0x76,
+ 0xe0, 0xea, 0xe2, 0x35, 0xcc, 0x78, 0xf1, 0xb5, 0x02, 0x97, 0xe6, 0x37, 0x2a, 0x73, 0x7d, 0xf8,
+ 0x14, 0x4a, 0x23, 0x1a, 0x9e, 0xb9, 0xf1, 0x65, 0xfd, 0xc1, 0x9c, 0x2b, 0x80, 0x8b, 0xa7, 0x63,
+ 0x15, 0x69, 0xa5, 0xef, 0x90, 0xfc, 0xa2, 0x6e, 0x43, 0x7a, 0x33, 0xe3, 0xe9, 0xaf, 0x73, 0xf0,
+ 0xe6, 0x5c, 0xe3, 0x73, 0x1d, 0x7d, 0x17, 0xc0, 0x66, 0xde, 0x38, 0x94, 0x17, 0xb2, 0xac, 0x4f,
+ 0x15, 0x81, 0x88, 0xb3, 0xcf, 0x6b, 0xcf, 0x38, 0x4c, 0xe4, 0x79, 0x21, 0x07, 0x09, 0x09, 0xc2,
+ 0xc3, 0x89, 0xa3, 0x05, 0xe1, 0xe8, 0x7b, 0x0b, 0x56, 0x3a, 0x73, 0xd7, 0x7d, 0x08, 0xc8, 0x74,
+ 0x6c, 0xca, 0x42, 0x23, 0x08, 0x7d, 0x4a, 0x46, 0x36, 0x1b, 0x8a, 0x02, 0x5c, 0xde, 0x29, 0x0e,
+ 0x88, 0x13, 0x50, 0x7d, 0x45, 0x8a, 0xfb, 0xb1, 0x94, 0x6b, 0x88, 0x5b, 0xc6, 0x4f, 0x69, 0x94,
+ 0x32, 0x1a, 0x52, 0x9c, 0x68, 0x34, 0x7e, 0xb3, 0x04, 0xd5, 0x54, 0x5b, 0x87, 0xaf, 0x42, 0xed,
+ 0x29, 0x79, 0x4e, 0x8c, 0xb8, 0x55, 0x97, 0x91, 0xa8, 0x72, 0xac, 0x17, 0xb5, 0xeb, 0x1f, 0xc2,
+ 0xba, 0xa0, 0xb8, 0xe3, 0x90, 0xfa, 0x86, 0xe9, 0x90, 0x20, 0x10, 0x41, 0x2b, 0x0b, 0x2a, 0xe6,
+ 0xb2, 0x2e, 0x17, 0xed, 0xc5, 0x12, 0x7c, 0x1f, 0xd6, 0x84, 0xc6, 0x68, 0xec, 0x84, 0xb6, 0xe7,
+ 0x50, 0x83, 0x7f, 0x3c, 0x04, 0xa2, 0x10, 0x27, 0x9e, 0xad, 0x72, 0xc6, 0x61, 0x44, 0xe0, 0x1e,
+ 0x05, 0xb8, 0x0d, 0xef, 0x0a, 0xb5, 0x21, 0x65, 0xd4, 0x27, 0x21, 0x35, 0xe8, 0x2f, 0xc7, 0xc4,
+ 0x09, 0x0c, 0xc2, 0x2c, 0xe3, 0x8c, 0x04, 0x67, 0xea, 0x3a, 0x37, 0xb0, 0x9b, 0x53, 0x15, 0xfd,
+ 0x32, 0x27, 0xee, 0x47, 0x3c, 0x4d, 0xd0, 0x5a, 0xcc, 0xfa, 0x8c, 0x04, 0x67, 0x78, 0x07, 0x2e,
+ 0x09, 0x2b, 0x41, 0xe8, 0xdb, 0x6c, 0x68, 0x98, 0x67, 0xd4, 0x7c, 0x66, 0x8c, 0xc3, 0xc1, 0x43,
+ 0xf5, 0xed, 0xf4, 0xfc, 0xc2, 0xc3, 0xbe, 0xe0, 0xec, 0x71, 0xca, 0x49, 0x38, 0x78, 0x88, 0xfb,
+ 0x50, 0xe3, 0x9b, 0x31, 0xb2, 0xbf, 0xa4, 0xc6, 0xc0, 0xf5, 0xc5, 0xcd, 0x52, 0x9f, 0x73, 0xb2,
+ 0x53, 0x11, 0x6c, 0x76, 0x23, 0x85, 0x43, 0xd7, 0xa2, 0x3b, 0xc5, 0x7e, 0x4f, 0xd3, 0xda, 0x7a,
+ 0x35, 0xb6, 0xf2, 0xc8, 0xf5, 0x79, 0x42, 0x0d, 0xdd, 0x24, 0xc0, 0x55, 0x99, 0x50, 0x43, 0x37,
+ 0x0e, 0xef, 0x7d, 0x58, 0x33, 0x4d, 0xb9, 0x66, 0xdb, 0x34, 0xa2, 0x16, 0x3f, 0x50, 0x51, 0x26,
+ 0x58, 0xa6, 0xb9, 0x2f, 0x09, 0x51, 0x8e, 0x07, 0xf8, 0x63, 0x78, 0x73, 0x12, 0xac, 0xb4, 0xe2,
+ 0xea, 0xcc, 0x2a, 0xa7, 0x55, 0xef, 0xc3, 0x9a, 0x77, 0x3e, 0xab, 0x88, 0x33, 0x33, 0x7a, 0xe7,
+ 0xd3, 0x6a, 0xd7, 0xc5, 0x67, 0x9b, 0x4f, 0x4d, 0x12, 0x52, 0x4b, 0x7d, 0x2b, 0xcd, 0x4e, 0x09,
+ 0xf0, 0x1d, 0x40, 0xa6, 0x69, 0x50, 0x46, 0x4e, 0x1d, 0x6a, 0x10, 0x9f, 0x32, 0x12, 0xa8, 0x57,
+ 0xd2, 0xe4, 0xba, 0x69, 0x6a, 0x42, 0xda, 0x12, 0x42, 0x7c, 0x0b, 0x56, 0xdd, 0xd3, 0xa7, 0xa6,
+ 0xcc, 0x2c, 0xc3, 0xf3, 0xe9, 0xc0, 0x7e, 0xa9, 0x5e, 0x13, 0x61, 0x5a, 0xe1, 0x02, 0x91, 0x57,
+ 0x3d, 0x01, 0xe3, 0x9b, 0x80, 0xcc, 0xe0, 0x8c, 0xf8, 0x9e, 0xb8, 0xda, 0x03, 0x8f, 0x98, 0x54,
+ 0xbd, 0x2e, 0xa9, 0x12, 0x3f, 0x8a, 0x61, 0xfc, 0x04, 0xd6, 0xc7, 0xcc, 0x66, 0x21, 0xf5, 0x3d,
+ 0x9f, 0xf2, 0x0e, 0x5d, 0x1e, 0x33, 0xf5, 0x5f, 0x4b, 0x0b, 0x7a, 0xec, 0x93, 0x34, 0x5b, 0xee,
+ 0xae, 0xbe, 0x36, 0x9e, 0x05, 0x1b, 0x3b, 0x50, 0x4b, 0x6f, 0x3a, 0xae, 0x80, 0xdc, 0x76, 0xa4,
+ 0xf0, 0x0b, 0x74, 0xaf, 0xdb, 0xe6, 0x57, 0xdf, 0x17, 0x1a, 0xca, 0xf1, 0x2b, 0xf8, 0xa0, 0x73,
+ 0xac, 0x19, 0xfa, 0xc9, 0xd1, 0x71, 0xe7, 0x50, 0x43, 0xf9, 0x5b, 0x95, 0xf2, 0xbf, 0x97, 0xd0,
+ 0xab, 0x57, 0xaf, 0x5e, 0xe5, 0x1e, 0x17, 0xca, 0x1f, 0xa0, 0x1b, 0x8d, 0xbf, 0xe6, 0xa0, 0x9e,
+ 0x6d, 0x7e, 0xf1, 0x4f, 0xe0, 0xad, 0xf8, 0x4b, 0x35, 0xa0, 0xa1, 0xf1, 0xc2, 0xf6, 0x45, 0x36,
+ 0x8e, 0x88, 0x6c, 0x1f, 0x93, 0x40, 0xae, 0x47, 0xac, 0x3e, 0x0d, 0x7f, 0x61, 0xfb, 0x3c, 0xd7,
+ 0x46, 0x24, 0xc4, 0x07, 0x70, 0x85, 0xb9, 0x46, 0x10, 0x12, 0x66, 0x11, 0xdf, 0x32, 0x26, 0x6f,
+ 0x04, 0x06, 0x31, 0x4d, 0x1a, 0x04, 0xae, 0xbc, 0x05, 0x12, 0x2b, 0xef, 0x30, 0xb7, 0x1f, 0x91,
+ 0x27, 0xe5, 0xb1, 0x15, 0x51, 0xa7, 0x36, 0x3d, 0xbf, 0x68, 0xd3, 0xdf, 0x86, 0xca, 0x88, 0x78,
+ 0x06, 0x65, 0xa1, 0x7f, 0x2e, 0x5a, 0xb6, 0xb2, 0x5e, 0x1e, 0x11, 0x4f, 0xe3, 0xe3, 0xef, 0x6e,
+ 0x27, 0xb2, 0xd1, 0x2c, 0xa3, 0x4a, 0xe3, 0x1f, 0x79, 0xa8, 0xa5, 0x9b, 0x37, 0xde, 0x0b, 0x9b,
+ 0xa2, 0x50, 0x2b, 0xe2, 0x28, 0xbf, 0xff, 0x8d, 0xad, 0x5e, 0x73, 0x8f, 0x57, 0xf0, 0x9d, 0x92,
+ 0x6c, 0xa9, 0x74, 0xa9, 0xc9, 0x6f, 0x4f, 0x7e, 0x78, 0xa9, 0x6c, 0xd4, 0xcb, 0x7a, 0x34, 0xc2,
+ 0xfb, 0x50, 0x7a, 0x1a, 0x08, 0xdb, 0x25, 0x61, 0xfb, 0xda, 0x37, 0xdb, 0x7e, 0xdc, 0x17, 0xc6,
+ 0x2b, 0x8f, 0xfb, 0xc6, 0x51, 0x57, 0x3f, 0x6c, 0x1d, 0xe8, 0x91, 0x3a, 0xbe, 0x0c, 0x05, 0x87,
+ 0x7c, 0x79, 0x9e, 0xad, 0xf5, 0x02, 0xba, 0x68, 0xf8, 0x2f, 0x43, 0xe1, 0x05, 0x25, 0xcf, 0xb2,
+ 0x15, 0x56, 0x40, 0xdf, 0xe1, 0x31, 0xb8, 0x03, 0x45, 0x11, 0x2f, 0x0c, 0x10, 0x45, 0x0c, 0xbd,
+ 0x81, 0xcb, 0x50, 0xd8, 0xeb, 0xea, 0xfc, 0x28, 0x20, 0xa8, 0x49, 0xd4, 0xe8, 0x75, 0xb4, 0x3d,
+ 0x0d, 0xe5, 0x1a, 0xf7, 0xa1, 0x24, 0x83, 0xc0, 0x8f, 0x49, 0x12, 0x06, 0xf4, 0x46, 0x34, 0x8c,
+ 0x6c, 0x28, 0xb1, 0xf4, 0xe4, 0x70, 0x57, 0xd3, 0x51, 0x2e, 0xbb, 0xc9, 0x05, 0x54, 0x6c, 0x04,
+ 0x50, 0x4b, 0x77, 0x6f, 0xdf, 0x4b, 0x7e, 0x35, 0xfe, 0xa2, 0x40, 0x35, 0xd5, 0x8d, 0xf1, 0x3e,
+ 0x80, 0x38, 0x8e, 0xfb, 0xc2, 0x20, 0x8e, 0x4d, 0x82, 0x28, 0x35, 0x40, 0x40, 0x2d, 0x8e, 0x5c,
+ 0x74, 0xeb, 0xbe, 0x17, 0xe7, 0xff, 0xa8, 0x00, 0x9a, 0xee, 0xe4, 0xa6, 0x1c, 0x54, 0x7e, 0x50,
+ 0x07, 0xff, 0xa0, 0x40, 0x3d, 0xdb, 0xbe, 0x4d, 0xb9, 0x77, 0xf5, 0x07, 0x75, 0xef, 0xf7, 0x0a,
+ 0x2c, 0x67, 0x9a, 0xb6, 0xff, 0x2b, 0xef, 0x7e, 0x97, 0x87, 0xb5, 0x39, 0x7a, 0xb8, 0x15, 0x75,
+ 0xb7, 0xb2, 0xe1, 0xfe, 0xf1, 0x45, 0xe6, 0x6a, 0xf2, 0xfb, 0xb3, 0x47, 0xfc, 0x30, 0x6a, 0x86,
+ 0x6f, 0x02, 0xb2, 0x2d, 0xca, 0x42, 0x7b, 0x60, 0x53, 0x3f, 0xfa, 0x22, 0x97, 0x2d, 0xef, 0xca,
+ 0x04, 0x97, 0x1f, 0xe5, 0x3f, 0x02, 0xec, 0xb9, 0x81, 0x1d, 0xda, 0xcf, 0xa9, 0x61, 0xb3, 0xf8,
+ 0xf3, 0x9d, 0xb7, 0xc0, 0x05, 0x1d, 0xc5, 0x92, 0x0e, 0x0b, 0x13, 0x36, 0xa3, 0x43, 0x32, 0xc5,
+ 0xe6, 0x15, 0x30, 0xaf, 0xa3, 0x58, 0x92, 0xb0, 0xaf, 0x42, 0xcd, 0x72, 0xc7, 0xbc, 0xa1, 0x90,
+ 0x3c, 0x5e, 0x70, 0x15, 0xbd, 0x2a, 0xb1, 0x84, 0x12, 0x75, 0x7c, 0x93, 0x77, 0x83, 0x9a, 0x5e,
+ 0x95, 0x98, 0xa4, 0xdc, 0x80, 0x15, 0x32, 0x1c, 0xfa, 0xdc, 0x78, 0x6c, 0x48, 0xf6, 0xb0, 0xf5,
+ 0x04, 0x16, 0xc4, 0x8d, 0xc7, 0x50, 0x8e, 0xe3, 0xc0, 0x6f, 0x36, 0x1e, 0x09, 0xc3, 0x93, 0xaf,
+ 0x37, 0xb9, 0xad, 0x8a, 0x5e, 0x66, 0xb1, 0xf0, 0x2a, 0xd4, 0xec, 0xc0, 0x98, 0x3c, 0x23, 0xe6,
+ 0x36, 0x73, 0x5b, 0x65, 0xbd, 0x6a, 0x07, 0xc9, 0xbb, 0x51, 0xe3, 0xeb, 0x1c, 0xd4, 0xb3, 0xcf,
+ 0xa0, 0xb8, 0x0d, 0x65, 0xc7, 0x35, 0x89, 0x48, 0x04, 0xf9, 0x06, 0xbf, 0xf5, 0x9a, 0x97, 0xd3,
+ 0xe6, 0x41, 0xc4, 0xd7, 0x13, 0xcd, 0x8d, 0xbf, 0x29, 0x50, 0x8e, 0x61, 0x7c, 0x09, 0x0a, 0x1e,
+ 0x09, 0xcf, 0x84, 0xb9, 0xe2, 0x6e, 0x0e, 0x29, 0xba, 0x18, 0x73, 0x3c, 0xf0, 0x08, 0x13, 0x29,
+ 0x10, 0xe1, 0x7c, 0xcc, 0xf7, 0xd5, 0xa1, 0xc4, 0x12, 0x0d, 0xb2, 0x3b, 0x1a, 0x51, 0x16, 0x06,
+ 0xf1, 0xbe, 0x46, 0xf8, 0x5e, 0x04, 0xe3, 0xdb, 0xb0, 0x1a, 0xfa, 0xc4, 0x76, 0x32, 0xdc, 0x82,
+ 0xe0, 0xa2, 0x58, 0x90, 0x90, 0x77, 0xe0, 0x72, 0x6c, 0xd7, 0xa2, 0x21, 0x31, 0xcf, 0xa8, 0x35,
+ 0x51, 0x2a, 0x89, 0x37, 0xb6, 0xb7, 0x22, 0x42, 0x3b, 0x92, 0xc7, 0xba, 0x8d, 0xbf, 0x2b, 0xb0,
+ 0x1a, 0xb7, 0xf4, 0x56, 0x12, 0xac, 0x43, 0x00, 0xc2, 0x98, 0x1b, 0xa6, 0xc3, 0x35, 0x9b, 0xca,
+ 0x33, 0x7a, 0xcd, 0x56, 0xa2, 0xa4, 0xa7, 0x0c, 0x6c, 0x8c, 0x00, 0x26, 0x92, 0x85, 0x61, 0xbb,
+ 0x02, 0xd5, 0xe8, 0x8d, 0x5b, 0xfc, 0xa3, 0x44, 0x7e, 0x04, 0x82, 0x84, 0x78, 0xef, 0x8f, 0xd7,
+ 0xa1, 0x78, 0x4a, 0x87, 0x36, 0x8b, 0x5e, 0xde, 0xe4, 0x20, 0x7e, 0xcf, 0x2b, 0x24, 0xef, 0x79,
+ 0xbb, 0x4f, 0x60, 0xcd, 0x74, 0x47, 0xd3, 0xee, 0xee, 0xa2, 0xa9, 0x0f, 0xd1, 0xe0, 0x33, 0xe5,
+ 0x0b, 0x98, 0x74, 0x6a, 0x5f, 0xe5, 0xf2, 0xfb, 0xbd, 0xdd, 0x3f, 0xe5, 0x36, 0xf6, 0xa5, 0x5e,
+ 0x2f, 0x5e, 0xa6, 0x4e, 0x07, 0x0e, 0x35, 0xb9, 0xeb, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x5f,
+ 0x1c, 0x48, 0x4f, 0x0d, 0x1a, 0x00, 0x00,
+}
diff --git a/vendor/github.com/google/go-github/github/activity.go b/vendor/github.com/google/go-github/github/activity.go
index ad6da2b..d6c992c 100644
--- a/vendor/github.com/google/go-github/github/activity.go
+++ b/vendor/github.com/google/go-github/github/activity.go
@@ -5,6 +5,8 @@
package github
+import "context"
+
// ActivityService handles communication with the activity related
// methods of the GitHub API.
//
@@ -51,14 +53,14 @@ type Feeds struct {
//
// Note: Private feeds are only returned when authenticating via Basic Auth
// since current feed URIs use the older, non revocable auth tokens.
-func (s *ActivityService) ListFeeds() (*Feeds, *Response, error) {
+func (s *ActivityService) ListFeeds(ctx context.Context) (*Feeds, *Response, error) {
req, err := s.client.NewRequest("GET", "feeds", nil)
if err != nil {
return nil, nil, err
}
f := &Feeds{}
- resp, err := s.client.Do(req, f)
+ resp, err := s.client.Do(ctx, req, f)
if err != nil {
return nil, resp, err
}
diff --git a/vendor/github.com/google/go-github/github/activity_events.go b/vendor/github.com/google/go-github/github/activity_events.go
index f749f6d..78219f8 100644
--- a/vendor/github.com/google/go-github/github/activity_events.go
+++ b/vendor/github.com/google/go-github/github/activity_events.go
@@ -6,6 +6,7 @@
package github
import (
+ "context"
"encoding/json"
"fmt"
"time"
@@ -27,9 +28,9 @@ func (e Event) String() string {
return Stringify(e)
}
-// Payload returns the parsed event payload. For recognized event types,
+// ParsePayload parses the event payload. For recognized event types,
// a value of the corresponding struct type will be returned.
-func (e *Event) Payload() (payload interface{}) {
+func (e *Event) ParsePayload() (payload interface{}, err error) {
switch *e.Type {
case "CommitCommentEvent":
payload = &CommitCommentEvent{}
@@ -67,6 +68,12 @@ func (e *Event) Payload() (payload interface{}) {
payload = &PageBuildEvent{}
case "PingEvent":
payload = &PingEvent{}
+ case "ProjectEvent":
+ payload = &ProjectEvent{}
+ case "ProjectCardEvent":
+ payload = &ProjectCardEvent{}
+ case "ProjectColumnEvent":
+ payload = &ProjectColumnEvent{}
case "PublicEvent":
payload = &PublicEvent{}
case "PullRequestEvent":
@@ -88,8 +95,20 @@ func (e *Event) Payload() (payload interface{}) {
case "WatchEvent":
payload = &WatchEvent{}
}
- if err := json.Unmarshal(*e.RawPayload, &payload); err != nil {
- panic(err.Error())
+ err = json.Unmarshal(*e.RawPayload, &payload)
+ return payload, err
+}
+
+// Payload returns the parsed event payload. For recognized event types,
+// a value of the corresponding struct type will be returned.
+//
+// Deprecated: Use ParsePayload instead, which returns an error
+// rather than panics if JSON unmarshaling raw payload fails.
+func (e *Event) Payload() (payload interface{}) {
+ var err error
+ payload, err = e.ParsePayload()
+ if err != nil {
+ panic(err)
}
return payload
}
@@ -97,7 +116,7 @@ func (e *Event) Payload() (payload interface{}) {
// ListEvents drinks from the firehose of all public events across GitHub.
//
// GitHub API docs: https://developer.github.com/v3/activity/events/#list-public-events
-func (s *ActivityService) ListEvents(opt *ListOptions) ([]*Event, *Response, error) {
+func (s *ActivityService) ListEvents(ctx context.Context, opt *ListOptions) ([]*Event, *Response, error) {
u, err := addOptions("events", opt)
if err != nil {
return nil, nil, err
@@ -109,7 +128,7 @@ func (s *ActivityService) ListEvents(opt *ListOptions) ([]*Event, *Response, err
}
var events []*Event
- resp, err := s.client.Do(req, &events)
+ resp, err := s.client.Do(ctx, req, &events)
if err != nil {
return nil, resp, err
}
@@ -120,7 +139,7 @@ func (s *ActivityService) ListEvents(opt *ListOptions) ([]*Event, *Response, err
// ListRepositoryEvents lists events for a repository.
//
// GitHub API docs: https://developer.github.com/v3/activity/events/#list-repository-events
-func (s *ActivityService) ListRepositoryEvents(owner, repo string, opt *ListOptions) ([]*Event, *Response, error) {
+func (s *ActivityService) ListRepositoryEvents(ctx context.Context, owner, repo string, opt *ListOptions) ([]*Event, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/events", owner, repo)
u, err := addOptions(u, opt)
if err != nil {
@@ -133,7 +152,7 @@ func (s *ActivityService) ListRepositoryEvents(owner, repo string, opt *ListOpti
}
var events []*Event
- resp, err := s.client.Do(req, &events)
+ resp, err := s.client.Do(ctx, req, &events)
if err != nil {
return nil, resp, err
}
@@ -144,7 +163,7 @@ func (s *ActivityService) ListRepositoryEvents(owner, repo string, opt *ListOpti
// ListIssueEventsForRepository lists issue events for a repository.
//
// GitHub API docs: https://developer.github.com/v3/activity/events/#list-issue-events-for-a-repository
-func (s *ActivityService) ListIssueEventsForRepository(owner, repo string, opt *ListOptions) ([]*IssueEvent, *Response, error) {
+func (s *ActivityService) ListIssueEventsForRepository(ctx context.Context, owner, repo string, opt *ListOptions) ([]*IssueEvent, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/issues/events", owner, repo)
u, err := addOptions(u, opt)
if err != nil {
@@ -157,7 +176,7 @@ func (s *ActivityService) ListIssueEventsForRepository(owner, repo string, opt *
}
var events []*IssueEvent
- resp, err := s.client.Do(req, &events)
+ resp, err := s.client.Do(ctx, req, &events)
if err != nil {
return nil, resp, err
}
@@ -168,7 +187,7 @@ func (s *ActivityService) ListIssueEventsForRepository(owner, repo string, opt *
// ListEventsForRepoNetwork lists public events for a network of repositories.
//
// GitHub API docs: https://developer.github.com/v3/activity/events/#list-public-events-for-a-network-of-repositories
-func (s *ActivityService) ListEventsForRepoNetwork(owner, repo string, opt *ListOptions) ([]*Event, *Response, error) {
+func (s *ActivityService) ListEventsForRepoNetwork(ctx context.Context, owner, repo string, opt *ListOptions) ([]*Event, *Response, error) {
u := fmt.Sprintf("networks/%v/%v/events", owner, repo)
u, err := addOptions(u, opt)
if err != nil {
@@ -181,7 +200,7 @@ func (s *ActivityService) ListEventsForRepoNetwork(owner, repo string, opt *List
}
var events []*Event
- resp, err := s.client.Do(req, &events)
+ resp, err := s.client.Do(ctx, req, &events)
if err != nil {
return nil, resp, err
}
@@ -192,7 +211,7 @@ func (s *ActivityService) ListEventsForRepoNetwork(owner, repo string, opt *List
// ListEventsForOrganization lists public events for an organization.
//
// GitHub API docs: https://developer.github.com/v3/activity/events/#list-public-events-for-an-organization
-func (s *ActivityService) ListEventsForOrganization(org string, opt *ListOptions) ([]*Event, *Response, error) {
+func (s *ActivityService) ListEventsForOrganization(ctx context.Context, org string, opt *ListOptions) ([]*Event, *Response, error) {
u := fmt.Sprintf("orgs/%v/events", org)
u, err := addOptions(u, opt)
if err != nil {
@@ -205,7 +224,7 @@ func (s *ActivityService) ListEventsForOrganization(org string, opt *ListOptions
}
var events []*Event
- resp, err := s.client.Do(req, &events)
+ resp, err := s.client.Do(ctx, req, &events)
if err != nil {
return nil, resp, err
}
@@ -217,7 +236,7 @@ func (s *ActivityService) ListEventsForOrganization(org string, opt *ListOptions
// true, only public events will be returned.
//
// GitHub API docs: https://developer.github.com/v3/activity/events/#list-events-performed-by-a-user
-func (s *ActivityService) ListEventsPerformedByUser(user string, publicOnly bool, opt *ListOptions) ([]*Event, *Response, error) {
+func (s *ActivityService) ListEventsPerformedByUser(ctx context.Context, user string, publicOnly bool, opt *ListOptions) ([]*Event, *Response, error) {
var u string
if publicOnly {
u = fmt.Sprintf("users/%v/events/public", user)
@@ -235,7 +254,7 @@ func (s *ActivityService) ListEventsPerformedByUser(user string, publicOnly bool
}
var events []*Event
- resp, err := s.client.Do(req, &events)
+ resp, err := s.client.Do(ctx, req, &events)
if err != nil {
return nil, resp, err
}
@@ -247,7 +266,7 @@ func (s *ActivityService) ListEventsPerformedByUser(user string, publicOnly bool
// true, only public events will be returned.
//
// GitHub API docs: https://developer.github.com/v3/activity/events/#list-events-that-a-user-has-received
-func (s *ActivityService) ListEventsReceivedByUser(user string, publicOnly bool, opt *ListOptions) ([]*Event, *Response, error) {
+func (s *ActivityService) ListEventsReceivedByUser(ctx context.Context, user string, publicOnly bool, opt *ListOptions) ([]*Event, *Response, error) {
var u string
if publicOnly {
u = fmt.Sprintf("users/%v/received_events/public", user)
@@ -265,7 +284,7 @@ func (s *ActivityService) ListEventsReceivedByUser(user string, publicOnly bool,
}
var events []*Event
- resp, err := s.client.Do(req, &events)
+ resp, err := s.client.Do(ctx, req, &events)
if err != nil {
return nil, resp, err
}
@@ -277,7 +296,7 @@ func (s *ActivityService) ListEventsReceivedByUser(user string, publicOnly bool,
// must be authenticated as the user to view this.
//
// GitHub API docs: https://developer.github.com/v3/activity/events/#list-events-for-an-organization
-func (s *ActivityService) ListUserEventsForOrganization(org, user string, opt *ListOptions) ([]*Event, *Response, error) {
+func (s *ActivityService) ListUserEventsForOrganization(ctx context.Context, org, user string, opt *ListOptions) ([]*Event, *Response, error) {
u := fmt.Sprintf("users/%v/events/orgs/%v", user, org)
u, err := addOptions(u, opt)
if err != nil {
@@ -290,7 +309,7 @@ func (s *ActivityService) ListUserEventsForOrganization(org, user string, opt *L
}
var events []*Event
- resp, err := s.client.Do(req, &events)
+ resp, err := s.client.Do(ctx, req, &events)
if err != nil {
return nil, resp, err
}
diff --git a/vendor/github.com/google/go-github/github/activity_notifications.go b/vendor/github.com/google/go-github/github/activity_notifications.go
index 5ae80ad..45c8b2a 100644
--- a/vendor/github.com/google/go-github/github/activity_notifications.go
+++ b/vendor/github.com/google/go-github/github/activity_notifications.go
@@ -6,6 +6,7 @@
package github
import (
+ "context"
"fmt"
"time"
)
@@ -18,7 +19,7 @@ type Notification struct {
// Reason identifies the event that triggered the notification.
//
- // GitHub API Docs: https://developer.github.com/v3/activity/notifications/#notification-reasons
+ // GitHub API docs: https://developer.github.com/v3/activity/notifications/#notification-reasons
Reason *string `json:"reason,omitempty"`
Unread *bool `json:"unread,omitempty"`
@@ -48,8 +49,8 @@ type NotificationListOptions struct {
// ListNotifications lists all notifications for the authenticated user.
//
-// GitHub API Docs: https://developer.github.com/v3/activity/notifications/#list-your-notifications
-func (s *ActivityService) ListNotifications(opt *NotificationListOptions) ([]*Notification, *Response, error) {
+// GitHub API docs: https://developer.github.com/v3/activity/notifications/#list-your-notifications
+func (s *ActivityService) ListNotifications(ctx context.Context, opt *NotificationListOptions) ([]*Notification, *Response, error) {
u := fmt.Sprintf("notifications")
u, err := addOptions(u, opt)
if err != nil {
@@ -62,7 +63,7 @@ func (s *ActivityService) ListNotifications(opt *NotificationListOptions) ([]*No
}
var notifications []*Notification
- resp, err := s.client.Do(req, &notifications)
+ resp, err := s.client.Do(ctx, req, &notifications)
if err != nil {
return nil, resp, err
}
@@ -73,8 +74,8 @@ func (s *ActivityService) ListNotifications(opt *NotificationListOptions) ([]*No
// ListRepositoryNotifications lists all notifications in a given repository
// for the authenticated user.
//
-// GitHub API Docs: https://developer.github.com/v3/activity/notifications/#list-your-notifications-in-a-repository
-func (s *ActivityService) ListRepositoryNotifications(owner, repo string, opt *NotificationListOptions) ([]*Notification, *Response, error) {
+// GitHub API docs: https://developer.github.com/v3/activity/notifications/#list-your-notifications-in-a-repository
+func (s *ActivityService) ListRepositoryNotifications(ctx context.Context, owner, repo string, opt *NotificationListOptions) ([]*Notification, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/notifications", owner, repo)
u, err := addOptions(u, opt)
if err != nil {
@@ -87,7 +88,7 @@ func (s *ActivityService) ListRepositoryNotifications(owner, repo string, opt *N
}
var notifications []*Notification
- resp, err := s.client.Do(req, &notifications)
+ resp, err := s.client.Do(ctx, req, &notifications)
if err != nil {
return nil, resp, err
}
@@ -101,8 +102,8 @@ type markReadOptions struct {
// MarkNotificationsRead marks all notifications up to lastRead as read.
//
-// GitHub API Docs: https://developer.github.com/v3/activity/notifications/#mark-as-read
-func (s *ActivityService) MarkNotificationsRead(lastRead time.Time) (*Response, error) {
+// GitHub API docs: https://developer.github.com/v3/activity/notifications/#mark-as-read
+func (s *ActivityService) MarkNotificationsRead(ctx context.Context, lastRead time.Time) (*Response, error) {
opts := &markReadOptions{
LastReadAt: lastRead,
}
@@ -111,14 +112,14 @@ func (s *ActivityService) MarkNotificationsRead(lastRead time.Time) (*Response,
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
// MarkRepositoryNotificationsRead marks all notifications up to lastRead in
// the specified repository as read.
//
-// GitHub API Docs: https://developer.github.com/v3/activity/notifications/#mark-notifications-as-read-in-a-repository
-func (s *ActivityService) MarkRepositoryNotificationsRead(owner, repo string, lastRead time.Time) (*Response, error) {
+// GitHub API docs: https://developer.github.com/v3/activity/notifications/#mark-notifications-as-read-in-a-repository
+func (s *ActivityService) MarkRepositoryNotificationsRead(ctx context.Context, owner, repo string, lastRead time.Time) (*Response, error) {
opts := &markReadOptions{
LastReadAt: lastRead,
}
@@ -128,13 +129,13 @@ func (s *ActivityService) MarkRepositoryNotificationsRead(owner, repo string, la
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
// GetThread gets the specified notification thread.
//
-// GitHub API Docs: https://developer.github.com/v3/activity/notifications/#view-a-single-thread
-func (s *ActivityService) GetThread(id string) (*Notification, *Response, error) {
+// GitHub API docs: https://developer.github.com/v3/activity/notifications/#view-a-single-thread
+func (s *ActivityService) GetThread(ctx context.Context, id string) (*Notification, *Response, error) {
u := fmt.Sprintf("notifications/threads/%v", id)
req, err := s.client.NewRequest("GET", u, nil)
@@ -143,7 +144,7 @@ func (s *ActivityService) GetThread(id string) (*Notification, *Response, error)
}
notification := new(Notification)
- resp, err := s.client.Do(req, notification)
+ resp, err := s.client.Do(ctx, req, notification)
if err != nil {
return nil, resp, err
}
@@ -153,8 +154,8 @@ func (s *ActivityService) GetThread(id string) (*Notification, *Response, error)
// MarkThreadRead marks the specified thread as read.
//
-// GitHub API Docs: https://developer.github.com/v3/activity/notifications/#mark-a-thread-as-read
-func (s *ActivityService) MarkThreadRead(id string) (*Response, error) {
+// GitHub API docs: https://developer.github.com/v3/activity/notifications/#mark-a-thread-as-read
+func (s *ActivityService) MarkThreadRead(ctx context.Context, id string) (*Response, error) {
u := fmt.Sprintf("notifications/threads/%v", id)
req, err := s.client.NewRequest("PATCH", u, nil)
@@ -162,14 +163,14 @@ func (s *ActivityService) MarkThreadRead(id string) (*Response, error) {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
// GetThreadSubscription checks to see if the authenticated user is subscribed
// to a thread.
//
-// GitHub API Docs: https://developer.github.com/v3/activity/notifications/#get-a-thread-subscription
-func (s *ActivityService) GetThreadSubscription(id string) (*Subscription, *Response, error) {
+// GitHub API docs: https://developer.github.com/v3/activity/notifications/#get-a-thread-subscription
+func (s *ActivityService) GetThreadSubscription(ctx context.Context, id string) (*Subscription, *Response, error) {
u := fmt.Sprintf("notifications/threads/%v/subscription", id)
req, err := s.client.NewRequest("GET", u, nil)
@@ -178,7 +179,7 @@ func (s *ActivityService) GetThreadSubscription(id string) (*Subscription, *Resp
}
sub := new(Subscription)
- resp, err := s.client.Do(req, sub)
+ resp, err := s.client.Do(ctx, req, sub)
if err != nil {
return nil, resp, err
}
@@ -189,8 +190,8 @@ func (s *ActivityService) GetThreadSubscription(id string) (*Subscription, *Resp
// SetThreadSubscription sets the subscription for the specified thread for the
// authenticated user.
//
-// GitHub API Docs: https://developer.github.com/v3/activity/notifications/#set-a-thread-subscription
-func (s *ActivityService) SetThreadSubscription(id string, subscription *Subscription) (*Subscription, *Response, error) {
+// GitHub API docs: https://developer.github.com/v3/activity/notifications/#set-a-thread-subscription
+func (s *ActivityService) SetThreadSubscription(ctx context.Context, id string, subscription *Subscription) (*Subscription, *Response, error) {
u := fmt.Sprintf("notifications/threads/%v/subscription", id)
req, err := s.client.NewRequest("PUT", u, subscription)
@@ -199,7 +200,7 @@ func (s *ActivityService) SetThreadSubscription(id string, subscription *Subscri
}
sub := new(Subscription)
- resp, err := s.client.Do(req, sub)
+ resp, err := s.client.Do(ctx, req, sub)
if err != nil {
return nil, resp, err
}
@@ -210,13 +211,13 @@ func (s *ActivityService) SetThreadSubscription(id string, subscription *Subscri
// DeleteThreadSubscription deletes the subscription for the specified thread
// for the authenticated user.
//
-// GitHub API Docs: https://developer.github.com/v3/activity/notifications/#delete-a-thread-subscription
-func (s *ActivityService) DeleteThreadSubscription(id string) (*Response, error) {
+// GitHub API docs: https://developer.github.com/v3/activity/notifications/#delete-a-thread-subscription
+func (s *ActivityService) DeleteThreadSubscription(ctx context.Context, id string) (*Response, error) {
u := fmt.Sprintf("notifications/threads/%v/subscription", id)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
diff --git a/vendor/github.com/google/go-github/github/activity_star.go b/vendor/github.com/google/go-github/github/activity_star.go
index db9a309..d5b0671 100644
--- a/vendor/github.com/google/go-github/github/activity_star.go
+++ b/vendor/github.com/google/go-github/github/activity_star.go
@@ -5,7 +5,10 @@
package github
-import "fmt"
+import (
+ "context"
+ "fmt"
+)
// StarredRepository is returned by ListStarred.
type StarredRepository struct {
@@ -21,8 +24,8 @@ type Stargazer struct {
// ListStargazers lists people who have starred the specified repo.
//
-// GitHub API Docs: https://developer.github.com/v3/activity/starring/#list-stargazers
-func (s *ActivityService) ListStargazers(owner, repo string, opt *ListOptions) ([]*Stargazer, *Response, error) {
+// GitHub API docs: https://developer.github.com/v3/activity/starring/#list-stargazers
+func (s *ActivityService) ListStargazers(ctx context.Context, owner, repo string, opt *ListOptions) ([]*Stargazer, *Response, error) {
u := fmt.Sprintf("repos/%s/%s/stargazers", owner, repo)
u, err := addOptions(u, opt)
if err != nil {
@@ -38,7 +41,7 @@ func (s *ActivityService) ListStargazers(owner, repo string, opt *ListOptions) (
req.Header.Set("Accept", mediaTypeStarringPreview)
var stargazers []*Stargazer
- resp, err := s.client.Do(req, &stargazers)
+ resp, err := s.client.Do(ctx, req, &stargazers)
if err != nil {
return nil, resp, err
}
@@ -64,7 +67,7 @@ type ActivityListStarredOptions struct {
// will list the starred repositories for the authenticated user.
//
// GitHub API docs: https://developer.github.com/v3/activity/starring/#list-repositories-being-starred
-func (s *ActivityService) ListStarred(user string, opt *ActivityListStarredOptions) ([]*StarredRepository, *Response, error) {
+func (s *ActivityService) ListStarred(ctx context.Context, user string, opt *ActivityListStarredOptions) ([]*StarredRepository, *Response, error) {
var u string
if user != "" {
u = fmt.Sprintf("users/%v/starred", user)
@@ -85,7 +88,7 @@ func (s *ActivityService) ListStarred(user string, opt *ActivityListStarredOptio
req.Header.Set("Accept", mediaTypeStarringPreview)
var repos []*StarredRepository
- resp, err := s.client.Do(req, &repos)
+ resp, err := s.client.Do(ctx, req, &repos)
if err != nil {
return nil, resp, err
}
@@ -96,13 +99,13 @@ func (s *ActivityService) ListStarred(user string, opt *ActivityListStarredOptio
// IsStarred checks if a repository is starred by authenticated user.
//
// GitHub API docs: https://developer.github.com/v3/activity/starring/#check-if-you-are-starring-a-repository
-func (s *ActivityService) IsStarred(owner, repo string) (bool, *Response, error) {
+func (s *ActivityService) IsStarred(ctx context.Context, owner, repo string) (bool, *Response, error) {
u := fmt.Sprintf("user/starred/%v/%v", owner, repo)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return false, nil, err
}
- resp, err := s.client.Do(req, nil)
+ resp, err := s.client.Do(ctx, req, nil)
starred, err := parseBoolResponse(err)
return starred, resp, err
}
@@ -110,23 +113,23 @@ func (s *ActivityService) IsStarred(owner, repo string) (bool, *Response, error)
// Star a repository as the authenticated user.
//
// GitHub API docs: https://developer.github.com/v3/activity/starring/#star-a-repository
-func (s *ActivityService) Star(owner, repo string) (*Response, error) {
+func (s *ActivityService) Star(ctx context.Context, owner, repo string) (*Response, error) {
u := fmt.Sprintf("user/starred/%v/%v", owner, repo)
req, err := s.client.NewRequest("PUT", u, nil)
if err != nil {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
// Unstar a repository as the authenticated user.
//
// GitHub API docs: https://developer.github.com/v3/activity/starring/#unstar-a-repository
-func (s *ActivityService) Unstar(owner, repo string) (*Response, error) {
+func (s *ActivityService) Unstar(ctx context.Context, owner, repo string) (*Response, error) {
u := fmt.Sprintf("user/starred/%v/%v", owner, repo)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
diff --git a/vendor/github.com/google/go-github/github/activity_watching.go b/vendor/github.com/google/go-github/github/activity_watching.go
index d8ee72d..c749ca8 100644
--- a/vendor/github.com/google/go-github/github/activity_watching.go
+++ b/vendor/github.com/google/go-github/github/activity_watching.go
@@ -5,7 +5,10 @@
package github
-import "fmt"
+import (
+ "context"
+ "fmt"
+)
// Subscription identifies a repository or thread subscription.
type Subscription struct {
@@ -24,8 +27,8 @@ type Subscription struct {
// ListWatchers lists watchers of a particular repo.
//
-// GitHub API Docs: https://developer.github.com/v3/activity/watching/#list-watchers
-func (s *ActivityService) ListWatchers(owner, repo string, opt *ListOptions) ([]*User, *Response, error) {
+// GitHub API docs: https://developer.github.com/v3/activity/watching/#list-watchers
+func (s *ActivityService) ListWatchers(ctx context.Context, owner, repo string, opt *ListOptions) ([]*User, *Response, error) {
u := fmt.Sprintf("repos/%s/%s/subscribers", owner, repo)
u, err := addOptions(u, opt)
if err != nil {
@@ -38,7 +41,7 @@ func (s *ActivityService) ListWatchers(owner, repo string, opt *ListOptions) ([]
}
var watchers []*User
- resp, err := s.client.Do(req, &watchers)
+ resp, err := s.client.Do(ctx, req, &watchers)
if err != nil {
return nil, resp, err
}
@@ -49,8 +52,8 @@ func (s *ActivityService) ListWatchers(owner, repo string, opt *ListOptions) ([]
// ListWatched lists the repositories the specified user is watching. Passing
// the empty string will fetch watched repos for the authenticated user.
//
-// GitHub API Docs: https://developer.github.com/v3/activity/watching/#list-repositories-being-watched
-func (s *ActivityService) ListWatched(user string, opt *ListOptions) ([]*Repository, *Response, error) {
+// GitHub API docs: https://developer.github.com/v3/activity/watching/#list-repositories-being-watched
+func (s *ActivityService) ListWatched(ctx context.Context, user string, opt *ListOptions) ([]*Repository, *Response, error) {
var u string
if user != "" {
u = fmt.Sprintf("users/%v/subscriptions", user)
@@ -68,7 +71,7 @@ func (s *ActivityService) ListWatched(user string, opt *ListOptions) ([]*Reposit
}
var watched []*Repository
- resp, err := s.client.Do(req, &watched)
+ resp, err := s.client.Do(ctx, req, &watched)
if err != nil {
return nil, resp, err
}
@@ -80,8 +83,8 @@ func (s *ActivityService) ListWatched(user string, opt *ListOptions) ([]*Reposit
// repository for the authenticated user. If the authenticated user is not
// watching the repository, a nil Subscription is returned.
//
-// GitHub API Docs: https://developer.github.com/v3/activity/watching/#get-a-repository-subscription
-func (s *ActivityService) GetRepositorySubscription(owner, repo string) (*Subscription, *Response, error) {
+// GitHub API docs: https://developer.github.com/v3/activity/watching/#get-a-repository-subscription
+func (s *ActivityService) GetRepositorySubscription(ctx context.Context, owner, repo string) (*Subscription, *Response, error) {
u := fmt.Sprintf("repos/%s/%s/subscription", owner, repo)
req, err := s.client.NewRequest("GET", u, nil)
@@ -90,7 +93,7 @@ func (s *ActivityService) GetRepositorySubscription(owner, repo string) (*Subscr
}
sub := new(Subscription)
- resp, err := s.client.Do(req, sub)
+ resp, err := s.client.Do(ctx, req, sub)
if err != nil {
// if it's just a 404, don't return that as an error
_, err = parseBoolResponse(err)
@@ -107,8 +110,8 @@ func (s *ActivityService) GetRepositorySubscription(owner, repo string) (*Subscr
// To ignore notifications made within a repository, set subscription.Ignored to true.
// To stop watching a repository, use DeleteRepositorySubscription.
//
-// GitHub API Docs: https://developer.github.com/v3/activity/watching/#set-a-repository-subscription
-func (s *ActivityService) SetRepositorySubscription(owner, repo string, subscription *Subscription) (*Subscription, *Response, error) {
+// GitHub API docs: https://developer.github.com/v3/activity/watching/#set-a-repository-subscription
+func (s *ActivityService) SetRepositorySubscription(ctx context.Context, owner, repo string, subscription *Subscription) (*Subscription, *Response, error) {
u := fmt.Sprintf("repos/%s/%s/subscription", owner, repo)
req, err := s.client.NewRequest("PUT", u, subscription)
@@ -117,7 +120,7 @@ func (s *ActivityService) SetRepositorySubscription(owner, repo string, subscrip
}
sub := new(Subscription)
- resp, err := s.client.Do(req, sub)
+ resp, err := s.client.Do(ctx, req, sub)
if err != nil {
return nil, resp, err
}
@@ -131,13 +134,13 @@ func (s *ActivityService) SetRepositorySubscription(owner, repo string, subscrip
// This is used to stop watching a repository. To control whether or not to
// receive notifications from a repository, use SetRepositorySubscription.
//
-// GitHub API Docs: https://developer.github.com/v3/activity/watching/#delete-a-repository-subscription
-func (s *ActivityService) DeleteRepositorySubscription(owner, repo string) (*Response, error) {
+// GitHub API docs: https://developer.github.com/v3/activity/watching/#delete-a-repository-subscription
+func (s *ActivityService) DeleteRepositorySubscription(ctx context.Context, owner, repo string) (*Response, error) {
u := fmt.Sprintf("repos/%s/%s/subscription", owner, repo)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
diff --git a/vendor/github.com/google/go-github/github/admin.go b/vendor/github.com/google/go-github/github/admin.go
index f77b2a2..d0f055b 100644
--- a/vendor/github.com/google/go-github/github/admin.go
+++ b/vendor/github.com/google/go-github/github/admin.go
@@ -5,7 +5,10 @@
package github
-import "fmt"
+import (
+ "context"
+ "fmt"
+)
// AdminService handles communication with the admin related methods of the
// GitHub API. These API routes are normally only accessible for GitHub
@@ -62,7 +65,7 @@ func (m UserLDAPMapping) String() string {
// UpdateUserLDAPMapping updates the mapping between a GitHub user and an LDAP user.
//
// GitHub API docs: https://developer.github.com/v3/enterprise/ldap/#update-ldap-mapping-for-a-user
-func (s *AdminService) UpdateUserLDAPMapping(user string, mapping *UserLDAPMapping) (*UserLDAPMapping, *Response, error) {
+func (s *AdminService) UpdateUserLDAPMapping(ctx context.Context, user string, mapping *UserLDAPMapping) (*UserLDAPMapping, *Response, error) {
u := fmt.Sprintf("admin/ldap/users/%v/mapping", user)
req, err := s.client.NewRequest("PATCH", u, mapping)
if err != nil {
@@ -70,7 +73,7 @@ func (s *AdminService) UpdateUserLDAPMapping(user string, mapping *UserLDAPMappi
}
m := new(UserLDAPMapping)
- resp, err := s.client.Do(req, m)
+ resp, err := s.client.Do(ctx, req, m)
if err != nil {
return nil, resp, err
}
@@ -81,7 +84,7 @@ func (s *AdminService) UpdateUserLDAPMapping(user string, mapping *UserLDAPMappi
// UpdateTeamLDAPMapping updates the mapping between a GitHub team and an LDAP group.
//
// GitHub API docs: https://developer.github.com/v3/enterprise/ldap/#update-ldap-mapping-for-a-team
-func (s *AdminService) UpdateTeamLDAPMapping(team int, mapping *TeamLDAPMapping) (*TeamLDAPMapping, *Response, error) {
+func (s *AdminService) UpdateTeamLDAPMapping(ctx context.Context, team int, mapping *TeamLDAPMapping) (*TeamLDAPMapping, *Response, error) {
u := fmt.Sprintf("admin/ldap/teams/%v/mapping", team)
req, err := s.client.NewRequest("PATCH", u, mapping)
if err != nil {
@@ -89,7 +92,7 @@ func (s *AdminService) UpdateTeamLDAPMapping(team int, mapping *TeamLDAPMapping)
}
m := new(TeamLDAPMapping)
- resp, err := s.client.Do(req, m)
+ resp, err := s.client.Do(ctx, req, m)
if err != nil {
return nil, resp, err
}
diff --git a/vendor/github.com/google/go-github/github/authorizations.go b/vendor/github.com/google/go-github/github/authorizations.go
index b50de5e..181e83d 100644
--- a/vendor/github.com/google/go-github/github/authorizations.go
+++ b/vendor/github.com/google/go-github/github/authorizations.go
@@ -5,11 +5,14 @@
package github
-import "fmt"
+import (
+ "context"
+ "fmt"
+)
// Scope models a GitHub authorization scope.
//
-// GitHub API docs:https://developer.github.com/v3/oauth/#scopes
+// GitHub API docs: https://developer.github.com/v3/oauth/#scopes
type Scope string
// This is the set of scopes for GitHub API V3
@@ -134,7 +137,7 @@ func (a AuthorizationUpdateRequest) String() string {
// List the authorizations for the authenticated user.
//
// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#list-your-authorizations
-func (s *AuthorizationsService) List(opt *ListOptions) ([]*Authorization, *Response, error) {
+func (s *AuthorizationsService) List(ctx context.Context, opt *ListOptions) ([]*Authorization, *Response, error) {
u := "authorizations"
u, err := addOptions(u, opt)
if err != nil {
@@ -147,7 +150,7 @@ func (s *AuthorizationsService) List(opt *ListOptions) ([]*Authorization, *Respo
}
var auths []*Authorization
- resp, err := s.client.Do(req, &auths)
+ resp, err := s.client.Do(ctx, req, &auths)
if err != nil {
return nil, resp, err
}
@@ -157,7 +160,7 @@ func (s *AuthorizationsService) List(opt *ListOptions) ([]*Authorization, *Respo
// Get a single authorization.
//
// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#get-a-single-authorization
-func (s *AuthorizationsService) Get(id int) (*Authorization, *Response, error) {
+func (s *AuthorizationsService) Get(ctx context.Context, id int) (*Authorization, *Response, error) {
u := fmt.Sprintf("authorizations/%d", id)
req, err := s.client.NewRequest("GET", u, nil)
@@ -166,7 +169,7 @@ func (s *AuthorizationsService) Get(id int) (*Authorization, *Response, error) {
}
a := new(Authorization)
- resp, err := s.client.Do(req, a)
+ resp, err := s.client.Do(ctx, req, a)
if err != nil {
return nil, resp, err
}
@@ -176,7 +179,7 @@ func (s *AuthorizationsService) Get(id int) (*Authorization, *Response, error) {
// Create a new authorization for the specified OAuth application.
//
// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#create-a-new-authorization
-func (s *AuthorizationsService) Create(auth *AuthorizationRequest) (*Authorization, *Response, error) {
+func (s *AuthorizationsService) Create(ctx context.Context, auth *AuthorizationRequest) (*Authorization, *Response, error) {
u := "authorizations"
req, err := s.client.NewRequest("POST", u, auth)
@@ -185,7 +188,7 @@ func (s *AuthorizationsService) Create(auth *AuthorizationRequest) (*Authorizati
}
a := new(Authorization)
- resp, err := s.client.Do(req, a)
+ resp, err := s.client.Do(ctx, req, a)
if err != nil {
return nil, resp, err
}
@@ -204,9 +207,9 @@ func (s *AuthorizationsService) Create(auth *AuthorizationRequest) (*Authorizati
// clientID is the OAuth Client ID with which to create the token.
//
// GitHub API docs:
-// - https://developer.github.com/v3/oauth_authorizations/#get-or-create-an-authorization-for-a-specific-app
-// - https://developer.github.com/v3/oauth_authorizations/#get-or-create-an-authorization-for-a-specific-app-and-fingerprint
-func (s *AuthorizationsService) GetOrCreateForApp(clientID string, auth *AuthorizationRequest) (*Authorization, *Response, error) {
+// https://developer.github.com/v3/oauth_authorizations/#get-or-create-an-authorization-for-a-specific-app
+// https://developer.github.com/v3/oauth_authorizations/#get-or-create-an-authorization-for-a-specific-app-and-fingerprint
+func (s *AuthorizationsService) GetOrCreateForApp(ctx context.Context, clientID string, auth *AuthorizationRequest) (*Authorization, *Response, error) {
var u string
if auth.Fingerprint == nil || *auth.Fingerprint == "" {
u = fmt.Sprintf("authorizations/clients/%v", clientID)
@@ -220,7 +223,7 @@ func (s *AuthorizationsService) GetOrCreateForApp(clientID string, auth *Authori
}
a := new(Authorization)
- resp, err := s.client.Do(req, a)
+ resp, err := s.client.Do(ctx, req, a)
if err != nil {
return nil, resp, err
}
@@ -231,7 +234,7 @@ func (s *AuthorizationsService) GetOrCreateForApp(clientID string, auth *Authori
// Edit a single authorization.
//
// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#update-an-existing-authorization
-func (s *AuthorizationsService) Edit(id int, auth *AuthorizationUpdateRequest) (*Authorization, *Response, error) {
+func (s *AuthorizationsService) Edit(ctx context.Context, id int, auth *AuthorizationUpdateRequest) (*Authorization, *Response, error) {
u := fmt.Sprintf("authorizations/%d", id)
req, err := s.client.NewRequest("PATCH", u, auth)
@@ -240,7 +243,7 @@ func (s *AuthorizationsService) Edit(id int, auth *AuthorizationUpdateRequest) (
}
a := new(Authorization)
- resp, err := s.client.Do(req, a)
+ resp, err := s.client.Do(ctx, req, a)
if err != nil {
return nil, resp, err
}
@@ -251,7 +254,7 @@ func (s *AuthorizationsService) Edit(id int, auth *AuthorizationUpdateRequest) (
// Delete a single authorization.
//
// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#delete-an-authorization
-func (s *AuthorizationsService) Delete(id int) (*Response, error) {
+func (s *AuthorizationsService) Delete(ctx context.Context, id int) (*Response, error) {
u := fmt.Sprintf("authorizations/%d", id)
req, err := s.client.NewRequest("DELETE", u, nil)
@@ -259,7 +262,7 @@ func (s *AuthorizationsService) Delete(id int) (*Response, error) {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
// Check if an OAuth token is valid for a specific app.
@@ -271,7 +274,7 @@ func (s *AuthorizationsService) Delete(id int) (*Response, error) {
// The returned Authorization.User field will be populated.
//
// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#check-an-authorization
-func (s *AuthorizationsService) Check(clientID string, token string) (*Authorization, *Response, error) {
+func (s *AuthorizationsService) Check(ctx context.Context, clientID string, token string) (*Authorization, *Response, error) {
u := fmt.Sprintf("applications/%v/tokens/%v", clientID, token)
req, err := s.client.NewRequest("GET", u, nil)
@@ -280,7 +283,7 @@ func (s *AuthorizationsService) Check(clientID string, token string) (*Authoriza
}
a := new(Authorization)
- resp, err := s.client.Do(req, a)
+ resp, err := s.client.Do(ctx, req, a)
if err != nil {
return nil, resp, err
}
@@ -299,7 +302,7 @@ func (s *AuthorizationsService) Check(clientID string, token string) (*Authoriza
// The returned Authorization.User field will be populated.
//
// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#reset-an-authorization
-func (s *AuthorizationsService) Reset(clientID string, token string) (*Authorization, *Response, error) {
+func (s *AuthorizationsService) Reset(ctx context.Context, clientID string, token string) (*Authorization, *Response, error) {
u := fmt.Sprintf("applications/%v/tokens/%v", clientID, token)
req, err := s.client.NewRequest("POST", u, nil)
@@ -308,7 +311,7 @@ func (s *AuthorizationsService) Reset(clientID string, token string) (*Authoriza
}
a := new(Authorization)
- resp, err := s.client.Do(req, a)
+ resp, err := s.client.Do(ctx, req, a)
if err != nil {
return nil, resp, err
}
@@ -323,7 +326,7 @@ func (s *AuthorizationsService) Reset(clientID string, token string) (*Authoriza
// clientSecret. Invalid tokens will return a 404 Not Found.
//
// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#revoke-an-authorization-for-an-application
-func (s *AuthorizationsService) Revoke(clientID string, token string) (*Response, error) {
+func (s *AuthorizationsService) Revoke(ctx context.Context, clientID string, token string) (*Response, error) {
u := fmt.Sprintf("applications/%v/tokens/%v", clientID, token)
req, err := s.client.NewRequest("DELETE", u, nil)
@@ -331,7 +334,7 @@ func (s *AuthorizationsService) Revoke(clientID string, token string) (*Response
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
// ListGrants lists the set of OAuth applications that have been granted
@@ -340,14 +343,14 @@ func (s *AuthorizationsService) Revoke(clientID string, token string) (*Response
// tokens an application has generated for the user.
//
// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#list-your-grants
-func (s *AuthorizationsService) ListGrants() ([]*Grant, *Response, error) {
+func (s *AuthorizationsService) ListGrants(ctx context.Context) ([]*Grant, *Response, error) {
req, err := s.client.NewRequest("GET", "applications/grants", nil)
if err != nil {
return nil, nil, err
}
grants := []*Grant{}
- resp, err := s.client.Do(req, &grants)
+ resp, err := s.client.Do(ctx, req, &grants)
if err != nil {
return nil, resp, err
}
@@ -358,7 +361,7 @@ func (s *AuthorizationsService) ListGrants() ([]*Grant, *Response, error) {
// GetGrant gets a single OAuth application grant.
//
// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#get-a-single-grant
-func (s *AuthorizationsService) GetGrant(id int) (*Grant, *Response, error) {
+func (s *AuthorizationsService) GetGrant(ctx context.Context, id int) (*Grant, *Response, error) {
u := fmt.Sprintf("applications/grants/%d", id)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -366,7 +369,7 @@ func (s *AuthorizationsService) GetGrant(id int) (*Grant, *Response, error) {
}
grant := new(Grant)
- resp, err := s.client.Do(req, grant)
+ resp, err := s.client.Do(ctx, req, grant)
if err != nil {
return nil, resp, err
}
@@ -379,14 +382,14 @@ func (s *AuthorizationsService) GetGrant(id int) (*Grant, *Response, error) {
// the user.
//
// GitHub API docs: https://developer.github.com/v3/oauth_authorizations/#delete-a-grant
-func (s *AuthorizationsService) DeleteGrant(id int) (*Response, error) {
+func (s *AuthorizationsService) DeleteGrant(ctx context.Context, id int) (*Response, error) {
u := fmt.Sprintf("applications/grants/%d", id)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
// CreateImpersonation creates an impersonation OAuth token.
@@ -396,7 +399,7 @@ func (s *AuthorizationsService) DeleteGrant(id int) (*Response, error) {
// new token automatically revokes an existing one.
//
// GitHub API docs: https://developer.github.com/enterprise/2.5/v3/users/administration/#create-an-impersonation-oauth-token
-func (s *AuthorizationsService) CreateImpersonation(username string, authReq *AuthorizationRequest) (*Authorization, *Response, error) {
+func (s *AuthorizationsService) CreateImpersonation(ctx context.Context, username string, authReq *AuthorizationRequest) (*Authorization, *Response, error) {
u := fmt.Sprintf("admin/users/%v/authorizations", username)
req, err := s.client.NewRequest("POST", u, authReq)
if err != nil {
@@ -404,7 +407,7 @@ func (s *AuthorizationsService) CreateImpersonation(username string, authReq *Au
}
a := new(Authorization)
- resp, err := s.client.Do(req, a)
+ resp, err := s.client.Do(ctx, req, a)
if err != nil {
return nil, resp, err
}
@@ -416,12 +419,12 @@ func (s *AuthorizationsService) CreateImpersonation(username string, authReq *Au
// NOTE: there can be only one at a time.
//
// GitHub API docs: https://developer.github.com/enterprise/2.5/v3/users/administration/#delete-an-impersonation-oauth-token
-func (s *AuthorizationsService) DeleteImpersonation(username string) (*Response, error) {
+func (s *AuthorizationsService) DeleteImpersonation(ctx context.Context, username string) (*Response, error) {
u := fmt.Sprintf("admin/users/%v/authorizations", username)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
diff --git a/vendor/github.com/google/go-github/github/doc.go b/vendor/github.com/google/go-github/github/doc.go
index 28ef1df..0acf328 100644
--- a/vendor/github.com/google/go-github/github/doc.go
+++ b/vendor/github.com/google/go-github/github/doc.go
@@ -16,7 +16,7 @@ access different parts of the GitHub API. For example:
client := github.NewClient(nil)
// list all organizations for user "willnorris"
- orgs, _, err := client.Organizations.List("willnorris", nil)
+ orgs, _, err := client.Organizations.List(ctx, "willnorris", nil)
Some API methods have optional parameters that can be passed. For example:
@@ -24,7 +24,7 @@ Some API methods have optional parameters that can be passed. For example:
// list public repositories for org "github"
opt := &github.RepositoryListByOrgOptions{Type: "public"}
- repos, _, err := client.Repositories.ListByOrg("github", opt)
+ repos, _, err := client.Repositories.ListByOrg(ctx, "github", opt)
The services of a client divide the API into logical chunks and correspond to
the structure of the GitHub API documentation at
@@ -42,15 +42,16 @@ use it with the oauth2 library using:
import "golang.org/x/oauth2"
func main() {
+ ctx := context.Background()
ts := oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: "... your access token ..."},
)
- tc := oauth2.NewClient(oauth2.NoContext, ts)
+ tc := oauth2.NewClient(ctx, ts)
client := github.NewClient(tc)
// list all repositories for the authenticated user
- repos, _, err := client.Repositories.List("", nil)
+ repos, _, err := client.Repositories.List(ctx, "", nil)
}
Note that when using an authenticated Client, all calls made by the client will
@@ -78,7 +79,7 @@ up-to-date rate limit data for the client.
To detect an API rate limit error, you can check if its type is *github.RateLimitError:
- repos, _, err := client.Repositories.List("", nil)
+ repos, _, err := client.Repositories.List(ctx, "", nil)
if _, ok := err.(*github.RateLimitError); ok {
log.Println("hit rate limit")
}
@@ -96,7 +97,7 @@ this behavior.
To detect this condition of error, you can check if its type is
*github.AcceptedError:
- stats, _, err := client.Repositories.ListContributorsStats(org, repo)
+ stats, _, err := client.Repositories.ListContributorsStats(ctx, org, repo)
if _, ok := err.(*github.AcceptedError); ok {
log.Println("scheduled on GitHub side")
}
@@ -124,7 +125,7 @@ bool, and int values. For example:
Name: github.String("foo"),
Private: github.Bool(true),
}
- client.Repositories.Create("", repo)
+ client.Repositories.Create(ctx, "", repo)
Users who have worked with protocol buffers should find this pattern familiar.
@@ -145,7 +146,7 @@ github.Response struct.
// get all pages of results
var allRepos []*github.Repository
for {
- repos, resp, err := client.Repositories.ListByOrg("github", opt)
+ repos, resp, err := client.Repositories.ListByOrg(ctx, "github", opt)
if err != nil {
return err
}
@@ -156,5 +157,16 @@ github.Response struct.
opt.ListOptions.Page = resp.NextPage
}
+Google App Engine
+
+Go on App Engine Classic (which as of this writing uses Go 1.6) can not use
+the "context" import and still relies on "golang.org/x/net/context".
+As a result, if you wish to continue to use "go-github" on App Engine Classic,
+you will need to rewrite all the "context" imports using the following command:
+
+ gofmt -w -r '"context" -> "golang.org/x/net/context"' *.go
+
+See "with_appengine.go" for more details.
+
*/
package github
diff --git a/vendor/github.com/google/go-github/github/event_types.go b/vendor/github.com/google/go-github/github/event_types.go
index b98492e..acf139b 100644
--- a/vendor/github.com/google/go-github/github/event_types.go
+++ b/vendor/github.com/google/go-github/github/event_types.go
@@ -10,7 +10,7 @@ package github
// CommitCommentEvent is triggered when a commit comment is created.
// The Webhook event name is "commit_comment".
//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#commitcommentevent
+// GitHub API docs: https://developer.github.com/v3/activity/events/types/#commitcommentevent
type CommitCommentEvent struct {
Comment *RepositoryComment `json:"comment,omitempty"`
@@ -28,7 +28,7 @@ type CommitCommentEvent struct {
// Additionally, webhooks will not receive this event for tags if more
// than three tags are pushed at once.
//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#createevent
+// GitHub API docs: https://developer.github.com/v3/activity/events/types/#createevent
type CreateEvent struct {
Ref *string `json:"ref,omitempty"`
// RefType is the object that was created. Possible values are: "repository", "branch", "tag".
@@ -49,7 +49,7 @@ type CreateEvent struct {
// Note: webhooks will not receive this event for tags if more than three tags
// are deleted at once.
//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#deleteevent
+// GitHub API docs: https://developer.github.com/v3/activity/events/types/#deleteevent
type DeleteEvent struct {
Ref *string `json:"ref,omitempty"`
// RefType is the object that was deleted. Possible values are: "branch", "tag".
@@ -67,7 +67,7 @@ type DeleteEvent struct {
//
// Events of this type are not visible in timelines, they are only used to trigger hooks.
//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#deploymentevent
+// GitHub API docs: https://developer.github.com/v3/activity/events/types/#deploymentevent
type DeploymentEvent struct {
Deployment *Deployment `json:"deployment,omitempty"`
Repo *Repository `json:"repository,omitempty"`
@@ -82,7 +82,7 @@ type DeploymentEvent struct {
//
// Events of this type are not visible in timelines, they are only used to trigger hooks.
//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#deploymentstatusevent
+// GitHub API docs: https://developer.github.com/v3/activity/events/types/#deploymentstatusevent
type DeploymentStatusEvent struct {
Deployment *Deployment `json:"deployment,omitempty"`
DeploymentStatus *DeploymentStatus `json:"deployment_status,omitempty"`
@@ -96,7 +96,7 @@ type DeploymentStatusEvent struct {
// ForkEvent is triggered when a user forks a repository.
// The Webhook event name is "fork".
//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#forkevent
+// GitHub API docs: https://developer.github.com/v3/activity/events/types/#forkevent
type ForkEvent struct {
// Forkee is the created repository.
Forkee *Repository `json:"forkee,omitempty"`
@@ -120,7 +120,7 @@ type Page struct {
// GollumEvent is triggered when a Wiki page is created or updated.
// The Webhook event name is "gollum".
//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#gollumevent
+// GitHub API docs: https://developer.github.com/v3/activity/events/types/#gollumevent
type GollumEvent struct {
Pages []*Page `json:"pages,omitempty"`
@@ -141,10 +141,34 @@ type EditChange struct {
} `json:"body,omitempty"`
}
+// ProjectChange represents the changes when a project has been edited.
+type ProjectChange struct {
+ Name *struct {
+ From *string `json:"from,omitempty"`
+ } `json:"name,omitempty"`
+ Body *struct {
+ From *string `json:"from,omitempty"`
+ } `json:"body,omitempty"`
+}
+
+// ProjectCardChange represents the changes when a project card has been edited.
+type ProjectCardChange struct {
+ Note *struct {
+ From *string `json:"from,omitempty"`
+ } `json:"note,omitempty"`
+}
+
+// ProjectColumnChange represents the changes when a project column has been edited.
+type ProjectColumnChange struct {
+ Name *struct {
+ From *string `json:"from,omitempty"`
+ } `json:"name,omitempty"`
+}
+
// IntegrationInstallationEvent is triggered when an integration is created or deleted.
// The Webhook event name is "integration_installation".
//
-// GitHub docs: https://developer.github.com/early-access/integrations/webhooks/#integrationinstallationevent
+// GitHub API docs: https://developer.github.com/early-access/integrations/webhooks/#integrationinstallationevent
type IntegrationInstallationEvent struct {
// The action that was performed. Possible values for an "integration_installation"
// event are: "created", "deleted".
@@ -156,7 +180,7 @@ type IntegrationInstallationEvent struct {
// IntegrationInstallationRepositoriesEvent is triggered when an integration repository
// is added or removed. The Webhook event name is "integration_installation_repositories".
//
-// GitHub docs: https://developer.github.com/early-access/integrations/webhooks/#integrationinstallationrepositoriesevent
+// GitHub API docs: https://developer.github.com/early-access/integrations/webhooks/#integrationinstallationrepositoriesevent
type IntegrationInstallationRepositoriesEvent struct {
// The action that was performed. Possible values for an "integration_installation_repositories"
// event are: "added", "removed".
@@ -171,7 +195,7 @@ type IntegrationInstallationRepositoriesEvent struct {
// or pull request.
// The Webhook event name is "issue_comment".
//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#issuecommentevent
+// GitHub API docs: https://developer.github.com/v3/activity/events/types/#issuecommentevent
type IssueCommentEvent struct {
// Action is the action that was performed on the comment.
// Possible values are: "created", "edited", "deleted".
@@ -190,7 +214,7 @@ type IssueCommentEvent struct {
// unlabeled, opened, closed, or reopened.
// The Webhook event name is "issues".
//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#issuesevent
+// GitHub API docs: https://developer.github.com/v3/activity/events/types/#issuesevent
type IssuesEvent struct {
// Action is the action that was performed. Possible values are: "assigned",
// "unassigned", "labeled", "unlabeled", "opened", "closed", "reopened", "edited".
@@ -209,7 +233,7 @@ type IssuesEvent struct {
// LabelEvent is triggered when a repository's label is created, edited, or deleted.
// The Webhook event name is "label"
//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#labelevent
+// GitHub API docs: https://developer.github.com/v3/activity/events/types/#labelevent
type LabelEvent struct {
// Action is the action that was performed. Possible values are:
// "created", "edited", "deleted"
@@ -226,7 +250,7 @@ type LabelEvent struct {
// MemberEvent is triggered when a user is added as a collaborator to a repository.
// The Webhook event name is "member".
//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#memberevent
+// GitHub API docs: https://developer.github.com/v3/activity/events/types/#memberevent
type MemberEvent struct {
// Action is the action that was performed. Possible value is: "added".
Action *string `json:"action,omitempty"`
@@ -244,7 +268,7 @@ type MemberEvent struct {
// Events of this type are not visible in timelines, they are only used to
// trigger organization webhooks.
//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#membershipevent
+// GitHub API docs: https://developer.github.com/v3/activity/events/types/#membershipevent
type MembershipEvent struct {
// Action is the action that was performed. Possible values are: "added", "removed".
Action *string `json:"action,omitempty"`
@@ -262,7 +286,7 @@ type MembershipEvent struct {
// MilestoneEvent is triggered when a milestone is created, closed, opened, edited, or deleted.
// The Webhook event name is "milestone".
//
-// Github docs: https://developer.github.com/v3/activity/events/types/#milestoneevent
+// GitHub API docs: https://developer.github.com/v3/activity/events/types/#milestoneevent
type MilestoneEvent struct {
// Action is the action that was performed. Possible values are:
// "created", "closed", "opened", "edited", "deleted"
@@ -281,7 +305,7 @@ type MilestoneEvent struct {
// Events of this type are not visible in timelines. These events are only used to trigger organization hooks.
// Webhook event name is "organization".
//
-// Github docs: https://developer.github.com/v3/activity/events/types/#organizationevent
+// GitHub API docs: https://developer.github.com/v3/activity/events/types/#organizationevent
type OrganizationEvent struct {
// Action is the action that was performed.
// Can be one of "member_added", "member_removed", or "member_invited".
@@ -308,7 +332,7 @@ type OrganizationEvent struct {
//
// Events of this type are not visible in timelines, they are only used to trigger hooks.
//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#pagebuildevent
+// GitHub API docs: https://developer.github.com/v3/activity/events/types/#pagebuildevent
type PageBuildEvent struct {
Build *PagesBuild `json:"build,omitempty"`
@@ -321,7 +345,7 @@ type PageBuildEvent struct {
// PingEvent is triggered when a Webhook is added to GitHub.
//
-// GitHub docs: https://developer.github.com/webhooks/#ping-event
+// GitHub API docs: https://developer.github.com/webhooks/#ping-event
type PingEvent struct {
// Random string of GitHub zen.
Zen *string `json:"zen,omitempty"`
@@ -332,11 +356,61 @@ type PingEvent struct {
Installation *Installation `json:"installation,omitempty"`
}
+// ProjectEvent is triggered when project is created, modified or deleted.
+// The webhook event name is "project".
+//
+// GitHub API docs: https://developer.github.com/v3/activity/events/types/#projectevent
+type ProjectEvent struct {
+ Action *string `json:"action,omitempty"`
+ Changes *ProjectChange `json:"changes,omitempty"`
+ Project *Project `json:"project,omitempty"`
+
+ // The following fields are only populated by Webhook events.
+ Repo *Repository `json:"repository,omitempty"`
+ Org *Organization `json:"organization,omitempty"`
+ Sender *User `json:"sender,omitempty"`
+ Installation *Installation `json:"installation,omitempty"`
+}
+
+// ProjectCardEvent is triggered when a project card is created, updated, moved, converted to an issue, or deleted.
+// The webhook event name is "project_card".
+//
+// GitHub API docs: https://developer.github.com/v3/activity/events/types/#projectcardevent
+type ProjectCardEvent struct {
+ Action *string `json:"action,omitempty"`
+ Changes *ProjectCardChange `json:"changes,omitempty"`
+ AfterID *int `json:"after_id,omitempty"`
+ ProjectCard *ProjectCard `json:"project_card,omitempty"`
+
+ // The following fields are only populated by Webhook events.
+ Repo *Repository `json:"repository,omitempty"`
+ Org *Organization `json:"organization,omitempty"`
+ Sender *User `json:"sender,omitempty"`
+ Installation *Installation `json:"installation,omitempty"`
+}
+
+// ProjectColumnEvent is triggered when a project column is created, updated, moved, or deleted.
+// The webhook event name is "project_column".
+//
+// GitHub API docs: https://developer.github.com/v3/activity/events/types/#projectcolumnevent
+type ProjectColumnEvent struct {
+ Action *string `json:"action,omitempty"`
+ Changes *ProjectColumnChange `json:"changes,omitempty"`
+ AfterID *int `json:"after_id,omitempty"`
+ ProjectColumn *ProjectColumn `json:"project_column,omitempty"`
+
+ // The following fields are only populated by Webhook events.
+ Repo *Repository `json:"repository,omitempty"`
+ Org *Organization `json:"organization,omitempty"`
+ Sender *User `json:"sender,omitempty"`
+ Installation *Installation `json:"installation,omitempty"`
+}
+
// PublicEvent is triggered when a private repository is open sourced.
// According to GitHub: "Without a doubt: the best GitHub event."
// The Webhook event name is "public".
//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#publicevent
+// GitHub API docs: https://developer.github.com/v3/activity/events/types/#publicevent
type PublicEvent struct {
// The following fields are only populated by Webhook events.
Repo *Repository `json:"repository,omitempty"`
@@ -348,7 +422,7 @@ type PublicEvent struct {
// labeled, unlabeled, opened, closed, reopened, or synchronized.
// The Webhook event name is "pull_request".
//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#pullrequestevent
+// GitHub API docs: https://developer.github.com/v3/activity/events/types/#pullrequestevent
type PullRequestEvent struct {
// Action is the action that was performed. Possible values are: "assigned",
// "unassigned", "labeled", "unlabeled", "opened", "closed", or "reopened",
@@ -370,7 +444,7 @@ type PullRequestEvent struct {
// request.
// The Webhook event name is "pull_request_review".
//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#pullrequestreviewevent
+// GitHub API docs: https://developer.github.com/v3/activity/events/types/#pullrequestreviewevent
type PullRequestReviewEvent struct {
// Action is always "submitted".
Action *string `json:"action,omitempty"`
@@ -391,7 +465,7 @@ type PullRequestReviewEvent struct {
// portion of the unified diff of a pull request.
// The Webhook event name is "pull_request_review_comment".
//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#pullrequestreviewcommentevent
+// GitHub API docs: https://developer.github.com/v3/activity/events/types/#pullrequestreviewcommentevent
type PullRequestReviewCommentEvent struct {
// Action is the action that was performed on the comment.
// Possible values are: "created", "edited", "deleted".
@@ -487,6 +561,7 @@ type PushEventRepository struct {
MasterBranch *string `json:"master_branch,omitempty"`
Organization *string `json:"organization,omitempty"`
URL *string `json:"url,omitempty"`
+ ArchiveURL *string `json:"archive_url,omitempty"`
HTMLURL *string `json:"html_url,omitempty"`
StatusesURL *string `json:"statuses_url,omitempty"`
GitURL *string `json:"git_url,omitempty"`
@@ -504,7 +579,7 @@ type PushEventRepoOwner struct {
// ReleaseEvent is triggered when a release is published.
// The Webhook event name is "release".
//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#releaseevent
+// GitHub API docs: https://developer.github.com/v3/activity/events/types/#releaseevent
type ReleaseEvent struct {
// Action is the action that was performed. Possible value is: "published".
Action *string `json:"action,omitempty"`
@@ -522,7 +597,7 @@ type ReleaseEvent struct {
// Events of this type are not visible in timelines, they are only used to
// trigger organization webhooks.
//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#repositoryevent
+// GitHub API docs: https://developer.github.com/v3/activity/events/types/#repositoryevent
type RepositoryEvent struct {
// Action is the action that was performed. Possible values are: "created", "deleted",
// "publicized", "privatized".
@@ -541,7 +616,7 @@ type RepositoryEvent struct {
// Events of this type are not visible in timelines, they are only used to
// trigger hooks.
//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#statusevent
+// GitHub API docs: https://developer.github.com/v3/activity/events/types/#statusevent
type StatusEvent struct {
SHA *string `json:"sha,omitempty"`
// State is the new state. Possible values are: "pending", "success", "failure", "error".
@@ -568,7 +643,7 @@ type StatusEvent struct {
// Events of this type are not visible in timelines. These events are only used
// to trigger hooks.
//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#teamaddevent
+// GitHub API docs: https://developer.github.com/v3/activity/events/types/#teamaddevent
type TeamAddEvent struct {
Team *Team `json:"team,omitempty"`
Repo *Repository `json:"repository,omitempty"`
@@ -585,7 +660,7 @@ type TeamAddEvent struct {
// The event’s actor is the user who starred a repository, and the event’s
// repository is the repository that was starred.
//
-// GitHub docs: https://developer.github.com/v3/activity/events/types/#watchevent
+// GitHub API docs: https://developer.github.com/v3/activity/events/types/#watchevent
type WatchEvent struct {
// Action is the action that was performed. Possible value is: "started".
Action *string `json:"action,omitempty"`
diff --git a/vendor/github.com/google/go-github/github/gen-accessors.go b/vendor/github.com/google/go-github/github/gen-accessors.go
new file mode 100644
index 0000000..131c56c
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/gen-accessors.go
@@ -0,0 +1,299 @@
+// Copyright 2017 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build ignore
+
+// gen-accessors generates accessor methods for structs with pointer fields.
+//
+// It is meant to be used by the go-github authors in conjunction with the
+// go generate tool before sending a commit to GitHub.
+package main
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "go/ast"
+ "go/format"
+ "go/parser"
+ "go/token"
+ "io/ioutil"
+ "log"
+ "os"
+ "sort"
+ "strings"
+ "text/template"
+ "time"
+)
+
+const (
+ fileSuffix = "-accessors.go"
+)
+
+var (
+ verbose = flag.Bool("v", false, "Print verbose log messages")
+
+ sourceTmpl = template.Must(template.New("source").Parse(source))
+
+ // blacklist lists which "struct.method" combos to not generate.
+ blacklist = map[string]bool{
+ "RepositoryContent.GetContent": true,
+ "Client.GetBaseURL": true,
+ "Client.GetUploadURL": true,
+ "ErrorResponse.GetResponse": true,
+ "RateLimitError.GetResponse": true,
+ "AbuseRateLimitError.GetResponse": true,
+ }
+)
+
+func logf(fmt string, args ...interface{}) {
+ if *verbose {
+ log.Printf(fmt, args...)
+ }
+}
+
+func main() {
+ flag.Parse()
+ fset := token.NewFileSet()
+
+ pkgs, err := parser.ParseDir(fset, ".", sourceFilter, 0)
+ if err != nil {
+ log.Fatal(err)
+ return
+ }
+
+ for pkgName, pkg := range pkgs {
+ t := &templateData{
+ filename: pkgName + fileSuffix,
+ Year: time.Now().Year(),
+ Package: pkgName,
+ Imports: map[string]string{},
+ }
+ for filename, f := range pkg.Files {
+ logf("Processing %v...", filename)
+ if err := t.processAST(f); err != nil {
+ log.Fatal(err)
+ }
+ }
+ if err := t.dump(); err != nil {
+ log.Fatal(err)
+ }
+ }
+ logf("Done.")
+}
+
+func (t *templateData) processAST(f *ast.File) error {
+ for _, decl := range f.Decls {
+ gd, ok := decl.(*ast.GenDecl)
+ if !ok {
+ continue
+ }
+ for _, spec := range gd.Specs {
+ ts, ok := spec.(*ast.TypeSpec)
+ if !ok {
+ continue
+ }
+ st, ok := ts.Type.(*ast.StructType)
+ if !ok {
+ continue
+ }
+ for _, field := range st.Fields.List {
+ se, ok := field.Type.(*ast.StarExpr)
+ if len(field.Names) == 0 || !ok {
+ continue
+ }
+
+ fieldName := field.Names[0]
+ if key := fmt.Sprintf("%v.Get%v", ts.Name, fieldName); blacklist[key] {
+ logf("Method %v blacklisted; skipping.", key)
+ continue
+ }
+
+ switch x := se.X.(type) {
+ case *ast.ArrayType:
+ t.addArrayType(x, ts.Name.String(), fieldName.String())
+ case *ast.Ident:
+ t.addIdent(x, ts.Name.String(), fieldName.String())
+ case *ast.MapType:
+ t.addMapType(x, ts.Name.String(), fieldName.String())
+ case *ast.SelectorExpr:
+ t.addSelectorExpr(x, ts.Name.String(), fieldName.String())
+ default:
+ logf("processAST: type %q, field %q, unknown %T: %+v", ts.Name, fieldName, x, x)
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func sourceFilter(fi os.FileInfo) bool {
+ return !strings.HasSuffix(fi.Name(), "_test.go") && !strings.HasSuffix(fi.Name(), fileSuffix)
+}
+
+func (t *templateData) dump() error {
+ if len(t.Getters) == 0 {
+ logf("No getters for %v; skipping.", t.filename)
+ return nil
+ }
+
+ // Sort getters by ReceiverType.FieldName
+ sort.Sort(byName(t.Getters))
+
+ var buf bytes.Buffer
+ if err := sourceTmpl.Execute(&buf, t); err != nil {
+ return err
+ }
+ clean, err := format.Source(buf.Bytes())
+ if err != nil {
+ return err
+ }
+
+ logf("Writing %v...", t.filename)
+ return ioutil.WriteFile(t.filename, clean, 0644)
+}
+
+func newGetter(receiverType, fieldName, fieldType, zeroValue string) *getter {
+ return &getter{
+ sortVal: strings.ToLower(receiverType) + "." + strings.ToLower(fieldName),
+ ReceiverVar: strings.ToLower(receiverType[:1]),
+ ReceiverType: receiverType,
+ FieldName: fieldName,
+ FieldType: fieldType,
+ ZeroValue: zeroValue,
+ }
+}
+
+func (t *templateData) addArrayType(x *ast.ArrayType, receiverType, fieldName string) {
+ var eltType string
+ switch elt := x.Elt.(type) {
+ case *ast.Ident:
+ eltType = elt.String()
+ default:
+ logf("addArrayType: type %q, field %q: unknown elt type: %T %+v; skipping.", receiverType, fieldName, elt, elt)
+ return
+ }
+
+ t.Getters = append(t.Getters, newGetter(receiverType, fieldName, "[]"+eltType, "nil"))
+}
+
+func (t *templateData) addIdent(x *ast.Ident, receiverType, fieldName string) {
+ var zeroValue string
+ switch x.String() {
+ case "int":
+ zeroValue = "0"
+ case "string":
+ zeroValue = `""`
+ case "bool":
+ zeroValue = "false"
+ case "Timestamp":
+ zeroValue = "Timestamp{}"
+ default: // other structs handled by their receivers directly.
+ return
+ }
+
+ t.Getters = append(t.Getters, newGetter(receiverType, fieldName, x.String(), zeroValue))
+}
+
+func (t *templateData) addMapType(x *ast.MapType, receiverType, fieldName string) {
+ var keyType string
+ switch key := x.Key.(type) {
+ case *ast.Ident:
+ keyType = key.String()
+ default:
+ logf("addMapType: type %q, field %q: unknown key type: %T %+v; skipping.", receiverType, fieldName, key, key)
+ return
+ }
+
+ var valueType string
+ switch value := x.Value.(type) {
+ case *ast.Ident:
+ valueType = value.String()
+ default:
+ logf("addMapType: type %q, field %q: unknown value type: %T %+v; skipping.", receiverType, fieldName, value, value)
+ return
+ }
+
+ fieldType := fmt.Sprintf("map[%v]%v", keyType, valueType)
+ zeroValue := fmt.Sprintf("map[%v]%v{}", keyType, valueType)
+ t.Getters = append(t.Getters, newGetter(receiverType, fieldName, fieldType, zeroValue))
+}
+
+func (t *templateData) addSelectorExpr(x *ast.SelectorExpr, receiverType, fieldName string) {
+ if strings.ToLower(fieldName[:1]) == fieldName[:1] { // non-exported field
+ return
+ }
+
+ var xX string
+ if xx, ok := x.X.(*ast.Ident); ok {
+ xX = xx.String()
+ }
+
+ switch xX {
+ case "time", "json":
+ if xX == "json" {
+ t.Imports["encoding/json"] = "encoding/json"
+ } else {
+ t.Imports[xX] = xX
+ }
+ fieldType := fmt.Sprintf("%v.%v", xX, x.Sel.Name)
+ zeroValue := fmt.Sprintf("%v.%v{}", xX, x.Sel.Name)
+ if xX == "time" && x.Sel.Name == "Duration" {
+ zeroValue = "0"
+ }
+ t.Getters = append(t.Getters, newGetter(receiverType, fieldName, fieldType, zeroValue))
+ default:
+ logf("addSelectorExpr: xX %q, type %q, field %q: unknown x=%+v; skipping.", xX, receiverType, fieldName, x)
+ }
+}
+
+type templateData struct {
+ filename string
+ Year int
+ Package string
+ Imports map[string]string
+ Getters []*getter
+}
+
+type getter struct {
+ sortVal string // lower-case version of "ReceiverType.FieldName"
+ ReceiverVar string // the one-letter variable name to match the ReceiverType
+ ReceiverType string
+ FieldName string
+ FieldType string
+ ZeroValue string
+}
+
+type byName []*getter
+
+func (b byName) Len() int { return len(b) }
+func (b byName) Less(i, j int) bool { return b[i].sortVal < b[j].sortVal }
+func (b byName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
+
+const source = `// Copyright {{.Year}} The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by gen-accessors; DO NOT EDIT.
+
+package {{.Package}}
+{{with .Imports}}
+import (
+ {{- range . -}}
+ "{{.}}"
+ {{end -}}
+)
+{{end}}
+{{range .Getters}}
+// Get{{.FieldName}} returns the {{.FieldName}} field if it's non-nil, zero value otherwise.
+func ({{.ReceiverVar}} *{{.ReceiverType}}) Get{{.FieldName}}() {{.FieldType}} {
+ if {{.ReceiverVar}} == nil || {{.ReceiverVar}}.{{.FieldName}} == nil {
+ return {{.ZeroValue}}
+ }
+ return *{{.ReceiverVar}}.{{.FieldName}}
+}
+{{end}}
+`
diff --git a/vendor/github.com/google/go-github/github/gists.go b/vendor/github.com/google/go-github/github/gists.go
index f727f54..e7d6586 100644
--- a/vendor/github.com/google/go-github/github/gists.go
+++ b/vendor/github.com/google/go-github/github/gists.go
@@ -6,6 +6,7 @@
package github
import (
+ "context"
"fmt"
"time"
)
@@ -58,7 +59,7 @@ type GistCommit struct {
Version *string `json:"version,omitempty"`
User *User `json:"user,omitempty"`
ChangeStatus *CommitStats `json:"change_status,omitempty"`
- CommitedAt *Timestamp `json:"commited_at,omitempty"`
+ CommittedAt *Timestamp `json:"committed_at,omitempty"`
}
func (gc GistCommit) String() string {
@@ -93,7 +94,7 @@ type GistListOptions struct {
// user.
//
// GitHub API docs: https://developer.github.com/v3/gists/#list-gists
-func (s *GistsService) List(user string, opt *GistListOptions) ([]*Gist, *Response, error) {
+func (s *GistsService) List(ctx context.Context, user string, opt *GistListOptions) ([]*Gist, *Response, error) {
var u string
if user != "" {
u = fmt.Sprintf("users/%v/gists", user)
@@ -111,7 +112,7 @@ func (s *GistsService) List(user string, opt *GistListOptions) ([]*Gist, *Respon
}
var gists []*Gist
- resp, err := s.client.Do(req, &gists)
+ resp, err := s.client.Do(ctx, req, &gists)
if err != nil {
return nil, resp, err
}
@@ -122,7 +123,7 @@ func (s *GistsService) List(user string, opt *GistListOptions) ([]*Gist, *Respon
// ListAll lists all public gists.
//
// GitHub API docs: https://developer.github.com/v3/gists/#list-gists
-func (s *GistsService) ListAll(opt *GistListOptions) ([]*Gist, *Response, error) {
+func (s *GistsService) ListAll(ctx context.Context, opt *GistListOptions) ([]*Gist, *Response, error) {
u, err := addOptions("gists/public", opt)
if err != nil {
return nil, nil, err
@@ -134,7 +135,7 @@ func (s *GistsService) ListAll(opt *GistListOptions) ([]*Gist, *Response, error)
}
var gists []*Gist
- resp, err := s.client.Do(req, &gists)
+ resp, err := s.client.Do(ctx, req, &gists)
if err != nil {
return nil, resp, err
}
@@ -145,7 +146,7 @@ func (s *GistsService) ListAll(opt *GistListOptions) ([]*Gist, *Response, error)
// ListStarred lists starred gists of authenticated user.
//
// GitHub API docs: https://developer.github.com/v3/gists/#list-gists
-func (s *GistsService) ListStarred(opt *GistListOptions) ([]*Gist, *Response, error) {
+func (s *GistsService) ListStarred(ctx context.Context, opt *GistListOptions) ([]*Gist, *Response, error) {
u, err := addOptions("gists/starred", opt)
if err != nil {
return nil, nil, err
@@ -157,7 +158,7 @@ func (s *GistsService) ListStarred(opt *GistListOptions) ([]*Gist, *Response, er
}
var gists []*Gist
- resp, err := s.client.Do(req, &gists)
+ resp, err := s.client.Do(ctx, req, &gists)
if err != nil {
return nil, resp, err
}
@@ -168,14 +169,14 @@ func (s *GistsService) ListStarred(opt *GistListOptions) ([]*Gist, *Response, er
// Get a single gist.
//
// GitHub API docs: https://developer.github.com/v3/gists/#get-a-single-gist
-func (s *GistsService) Get(id string) (*Gist, *Response, error) {
+func (s *GistsService) Get(ctx context.Context, id string) (*Gist, *Response, error) {
u := fmt.Sprintf("gists/%v", id)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
gist := new(Gist)
- resp, err := s.client.Do(req, gist)
+ resp, err := s.client.Do(ctx, req, gist)
if err != nil {
return nil, resp, err
}
@@ -186,14 +187,14 @@ func (s *GistsService) Get(id string) (*Gist, *Response, error) {
// GetRevision gets a specific revision of a gist.
//
// GitHub API docs: https://developer.github.com/v3/gists/#get-a-specific-revision-of-a-gist
-func (s *GistsService) GetRevision(id, sha string) (*Gist, *Response, error) {
+func (s *GistsService) GetRevision(ctx context.Context, id, sha string) (*Gist, *Response, error) {
u := fmt.Sprintf("gists/%v/%v", id, sha)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
gist := new(Gist)
- resp, err := s.client.Do(req, gist)
+ resp, err := s.client.Do(ctx, req, gist)
if err != nil {
return nil, resp, err
}
@@ -204,14 +205,14 @@ func (s *GistsService) GetRevision(id, sha string) (*Gist, *Response, error) {
// Create a gist for authenticated user.
//
// GitHub API docs: https://developer.github.com/v3/gists/#create-a-gist
-func (s *GistsService) Create(gist *Gist) (*Gist, *Response, error) {
+func (s *GistsService) Create(ctx context.Context, gist *Gist) (*Gist, *Response, error) {
u := "gists"
req, err := s.client.NewRequest("POST", u, gist)
if err != nil {
return nil, nil, err
}
g := new(Gist)
- resp, err := s.client.Do(req, g)
+ resp, err := s.client.Do(ctx, req, g)
if err != nil {
return nil, resp, err
}
@@ -222,14 +223,14 @@ func (s *GistsService) Create(gist *Gist) (*Gist, *Response, error) {
// Edit a gist.
//
// GitHub API docs: https://developer.github.com/v3/gists/#edit-a-gist
-func (s *GistsService) Edit(id string, gist *Gist) (*Gist, *Response, error) {
+func (s *GistsService) Edit(ctx context.Context, id string, gist *Gist) (*Gist, *Response, error) {
u := fmt.Sprintf("gists/%v", id)
req, err := s.client.NewRequest("PATCH", u, gist)
if err != nil {
return nil, nil, err
}
g := new(Gist)
- resp, err := s.client.Do(req, g)
+ resp, err := s.client.Do(ctx, req, g)
if err != nil {
return nil, resp, err
}
@@ -239,8 +240,8 @@ func (s *GistsService) Edit(id string, gist *Gist) (*Gist, *Response, error) {
// ListCommits lists commits of a gist.
//
-// Github API docs: https://developer.github.com/v3/gists/#list-gist-commits
-func (s *GistsService) ListCommits(id string) ([]*GistCommit, *Response, error) {
+// GitHub API docs: https://developer.github.com/v3/gists/#list-gist-commits
+func (s *GistsService) ListCommits(ctx context.Context, id string) ([]*GistCommit, *Response, error) {
u := fmt.Sprintf("gists/%v/commits", id)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -248,7 +249,7 @@ func (s *GistsService) ListCommits(id string) ([]*GistCommit, *Response, error)
}
var gistCommits []*GistCommit
- resp, err := s.client.Do(req, &gistCommits)
+ resp, err := s.client.Do(ctx, req, &gistCommits)
if err != nil {
return nil, resp, err
}
@@ -259,49 +260,49 @@ func (s *GistsService) ListCommits(id string) ([]*GistCommit, *Response, error)
// Delete a gist.
//
// GitHub API docs: https://developer.github.com/v3/gists/#delete-a-gist
-func (s *GistsService) Delete(id string) (*Response, error) {
+func (s *GistsService) Delete(ctx context.Context, id string) (*Response, error) {
u := fmt.Sprintf("gists/%v", id)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
// Star a gist on behalf of authenticated user.
//
// GitHub API docs: https://developer.github.com/v3/gists/#star-a-gist
-func (s *GistsService) Star(id string) (*Response, error) {
+func (s *GistsService) Star(ctx context.Context, id string) (*Response, error) {
u := fmt.Sprintf("gists/%v/star", id)
req, err := s.client.NewRequest("PUT", u, nil)
if err != nil {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
// Unstar a gist on a behalf of authenticated user.
//
-// Github API docs: https://developer.github.com/v3/gists/#unstar-a-gist
-func (s *GistsService) Unstar(id string) (*Response, error) {
+// GitHub API docs: https://developer.github.com/v3/gists/#unstar-a-gist
+func (s *GistsService) Unstar(ctx context.Context, id string) (*Response, error) {
u := fmt.Sprintf("gists/%v/star", id)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
// IsStarred checks if a gist is starred by authenticated user.
//
// GitHub API docs: https://developer.github.com/v3/gists/#check-if-a-gist-is-starred
-func (s *GistsService) IsStarred(id string) (bool, *Response, error) {
+func (s *GistsService) IsStarred(ctx context.Context, id string) (bool, *Response, error) {
u := fmt.Sprintf("gists/%v/star", id)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return false, nil, err
}
- resp, err := s.client.Do(req, nil)
+ resp, err := s.client.Do(ctx, req, nil)
starred, err := parseBoolResponse(err)
return starred, resp, err
}
@@ -309,7 +310,7 @@ func (s *GistsService) IsStarred(id string) (bool, *Response, error) {
// Fork a gist.
//
// GitHub API docs: https://developer.github.com/v3/gists/#fork-a-gist
-func (s *GistsService) Fork(id string) (*Gist, *Response, error) {
+func (s *GistsService) Fork(ctx context.Context, id string) (*Gist, *Response, error) {
u := fmt.Sprintf("gists/%v/forks", id)
req, err := s.client.NewRequest("POST", u, nil)
if err != nil {
@@ -317,7 +318,7 @@ func (s *GistsService) Fork(id string) (*Gist, *Response, error) {
}
g := new(Gist)
- resp, err := s.client.Do(req, g)
+ resp, err := s.client.Do(ctx, req, g)
if err != nil {
return nil, resp, err
}
@@ -327,8 +328,8 @@ func (s *GistsService) Fork(id string) (*Gist, *Response, error) {
// ListForks lists forks of a gist.
//
-// Github API docs: https://developer.github.com/v3/gists/#list-gist-forks
-func (s *GistsService) ListForks(id string) ([]*GistFork, *Response, error) {
+// GitHub API docs: https://developer.github.com/v3/gists/#list-gist-forks
+func (s *GistsService) ListForks(ctx context.Context, id string) ([]*GistFork, *Response, error) {
u := fmt.Sprintf("gists/%v/forks", id)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -336,7 +337,7 @@ func (s *GistsService) ListForks(id string) ([]*GistFork, *Response, error) {
}
var gistForks []*GistFork
- resp, err := s.client.Do(req, &gistForks)
+ resp, err := s.client.Do(ctx, req, &gistForks)
if err != nil {
return nil, resp, err
}
diff --git a/vendor/github.com/google/go-github/github/gists_comments.go b/vendor/github.com/google/go-github/github/gists_comments.go
index 84af61c..2d07223 100644
--- a/vendor/github.com/google/go-github/github/gists_comments.go
+++ b/vendor/github.com/google/go-github/github/gists_comments.go
@@ -6,6 +6,7 @@
package github
import (
+ "context"
"fmt"
"time"
)
@@ -26,7 +27,7 @@ func (g GistComment) String() string {
// ListComments lists all comments for a gist.
//
// GitHub API docs: https://developer.github.com/v3/gists/comments/#list-comments-on-a-gist
-func (s *GistsService) ListComments(gistID string, opt *ListOptions) ([]*GistComment, *Response, error) {
+func (s *GistsService) ListComments(ctx context.Context, gistID string, opt *ListOptions) ([]*GistComment, *Response, error) {
u := fmt.Sprintf("gists/%v/comments", gistID)
u, err := addOptions(u, opt)
if err != nil {
@@ -39,7 +40,7 @@ func (s *GistsService) ListComments(gistID string, opt *ListOptions) ([]*GistCom
}
var comments []*GistComment
- resp, err := s.client.Do(req, &comments)
+ resp, err := s.client.Do(ctx, req, &comments)
if err != nil {
return nil, resp, err
}
@@ -50,7 +51,7 @@ func (s *GistsService) ListComments(gistID string, opt *ListOptions) ([]*GistCom
// GetComment retrieves a single comment from a gist.
//
// GitHub API docs: https://developer.github.com/v3/gists/comments/#get-a-single-comment
-func (s *GistsService) GetComment(gistID string, commentID int) (*GistComment, *Response, error) {
+func (s *GistsService) GetComment(ctx context.Context, gistID string, commentID int) (*GistComment, *Response, error) {
u := fmt.Sprintf("gists/%v/comments/%v", gistID, commentID)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -58,7 +59,7 @@ func (s *GistsService) GetComment(gistID string, commentID int) (*GistComment, *
}
c := new(GistComment)
- resp, err := s.client.Do(req, c)
+ resp, err := s.client.Do(ctx, req, c)
if err != nil {
return nil, resp, err
}
@@ -69,7 +70,7 @@ func (s *GistsService) GetComment(gistID string, commentID int) (*GistComment, *
// CreateComment creates a comment for a gist.
//
// GitHub API docs: https://developer.github.com/v3/gists/comments/#create-a-comment
-func (s *GistsService) CreateComment(gistID string, comment *GistComment) (*GistComment, *Response, error) {
+func (s *GistsService) CreateComment(ctx context.Context, gistID string, comment *GistComment) (*GistComment, *Response, error) {
u := fmt.Sprintf("gists/%v/comments", gistID)
req, err := s.client.NewRequest("POST", u, comment)
if err != nil {
@@ -77,7 +78,7 @@ func (s *GistsService) CreateComment(gistID string, comment *GistComment) (*Gist
}
c := new(GistComment)
- resp, err := s.client.Do(req, c)
+ resp, err := s.client.Do(ctx, req, c)
if err != nil {
return nil, resp, err
}
@@ -88,7 +89,7 @@ func (s *GistsService) CreateComment(gistID string, comment *GistComment) (*Gist
// EditComment edits an existing gist comment.
//
// GitHub API docs: https://developer.github.com/v3/gists/comments/#edit-a-comment
-func (s *GistsService) EditComment(gistID string, commentID int, comment *GistComment) (*GistComment, *Response, error) {
+func (s *GistsService) EditComment(ctx context.Context, gistID string, commentID int, comment *GistComment) (*GistComment, *Response, error) {
u := fmt.Sprintf("gists/%v/comments/%v", gistID, commentID)
req, err := s.client.NewRequest("PATCH", u, comment)
if err != nil {
@@ -96,7 +97,7 @@ func (s *GistsService) EditComment(gistID string, commentID int, comment *GistCo
}
c := new(GistComment)
- resp, err := s.client.Do(req, c)
+ resp, err := s.client.Do(ctx, req, c)
if err != nil {
return nil, resp, err
}
@@ -107,12 +108,12 @@ func (s *GistsService) EditComment(gistID string, commentID int, comment *GistCo
// DeleteComment deletes a gist comment.
//
// GitHub API docs: https://developer.github.com/v3/gists/comments/#delete-a-comment
-func (s *GistsService) DeleteComment(gistID string, commentID int) (*Response, error) {
+func (s *GistsService) DeleteComment(ctx context.Context, gistID string, commentID int) (*Response, error) {
u := fmt.Sprintf("gists/%v/comments/%v", gistID, commentID)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
diff --git a/vendor/github.com/google/go-github/github/git_blobs.go b/vendor/github.com/google/go-github/github/git_blobs.go
index 5a46708..67ea74a 100644
--- a/vendor/github.com/google/go-github/github/git_blobs.go
+++ b/vendor/github.com/google/go-github/github/git_blobs.go
@@ -5,7 +5,10 @@
package github
-import "fmt"
+import (
+ "context"
+ "fmt"
+)
// Blob represents a blob object.
type Blob struct {
@@ -19,7 +22,7 @@ type Blob struct {
// GetBlob fetchs a blob from a repo given a SHA.
//
// GitHub API docs: https://developer.github.com/v3/git/blobs/#get-a-blob
-func (s *GitService) GetBlob(owner string, repo string, sha string) (*Blob, *Response, error) {
+func (s *GitService) GetBlob(ctx context.Context, owner string, repo string, sha string) (*Blob, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/git/blobs/%v", owner, repo, sha)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -27,14 +30,14 @@ func (s *GitService) GetBlob(owner string, repo string, sha string) (*Blob, *Res
}
blob := new(Blob)
- resp, err := s.client.Do(req, blob)
+ resp, err := s.client.Do(ctx, req, blob)
return blob, resp, err
}
// CreateBlob creates a blob object.
//
// GitHub API docs: https://developer.github.com/v3/git/blobs/#create-a-blob
-func (s *GitService) CreateBlob(owner string, repo string, blob *Blob) (*Blob, *Response, error) {
+func (s *GitService) CreateBlob(ctx context.Context, owner string, repo string, blob *Blob) (*Blob, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/git/blobs", owner, repo)
req, err := s.client.NewRequest("POST", u, blob)
if err != nil {
@@ -42,6 +45,6 @@ func (s *GitService) CreateBlob(owner string, repo string, blob *Blob) (*Blob, *
}
t := new(Blob)
- resp, err := s.client.Do(req, t)
+ resp, err := s.client.Do(ctx, req, t)
return t, resp, err
}
diff --git a/vendor/github.com/google/go-github/github/git_commits.go b/vendor/github.com/google/go-github/github/git_commits.go
index 29e0574..3c49a8a 100644
--- a/vendor/github.com/google/go-github/github/git_commits.go
+++ b/vendor/github.com/google/go-github/github/git_commits.go
@@ -6,6 +6,7 @@
package github
import (
+ "context"
"fmt"
"time"
)
@@ -27,6 +28,7 @@ type Commit struct {
Tree *Tree `json:"tree,omitempty"`
Parents []Commit `json:"parents,omitempty"`
Stats *CommitStats `json:"stats,omitempty"`
+ HTMLURL *string `json:"html_url,omitempty"`
URL *string `json:"url,omitempty"`
Verification *SignatureVerification `json:"verification,omitempty"`
@@ -58,7 +60,7 @@ func (c CommitAuthor) String() string {
// GetCommit fetchs the Commit object for a given SHA.
//
// GitHub API docs: https://developer.github.com/v3/git/commits/#get-a-commit
-func (s *GitService) GetCommit(owner string, repo string, sha string) (*Commit, *Response, error) {
+func (s *GitService) GetCommit(ctx context.Context, owner string, repo string, sha string) (*Commit, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/git/commits/%v", owner, repo, sha)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -69,7 +71,7 @@ func (s *GitService) GetCommit(owner string, repo string, sha string) (*Commit,
req.Header.Set("Accept", mediaTypeGitSigningPreview)
c := new(Commit)
- resp, err := s.client.Do(req, c)
+ resp, err := s.client.Do(ctx, req, c)
if err != nil {
return nil, resp, err
}
@@ -87,29 +89,33 @@ type createCommit struct {
}
// CreateCommit creates a new commit in a repository.
+// commit must not be nil.
//
// The commit.Committer is optional and will be filled with the commit.Author
// data if omitted. If the commit.Author is omitted, it will be filled in with
// the authenticated user’s information and the current date.
//
// GitHub API docs: https://developer.github.com/v3/git/commits/#create-a-commit
-func (s *GitService) CreateCommit(owner string, repo string, commit *Commit) (*Commit, *Response, error) {
+func (s *GitService) CreateCommit(ctx context.Context, owner string, repo string, commit *Commit) (*Commit, *Response, error) {
+ if commit == nil {
+ return nil, nil, fmt.Errorf("commit must be provided")
+ }
+
u := fmt.Sprintf("repos/%v/%v/git/commits", owner, repo)
- body := &createCommit{}
- if commit != nil {
- parents := make([]string, len(commit.Parents))
- for i, parent := range commit.Parents {
- parents[i] = *parent.SHA
- }
-
- body = &createCommit{
- Author: commit.Author,
- Committer: commit.Committer,
- Message: commit.Message,
- Tree: commit.Tree.SHA,
- Parents: parents,
- }
+ parents := make([]string, len(commit.Parents))
+ for i, parent := range commit.Parents {
+ parents[i] = *parent.SHA
+ }
+
+ body := &createCommit{
+ Author: commit.Author,
+ Committer: commit.Committer,
+ Message: commit.Message,
+ Parents: parents,
+ }
+ if commit.Tree != nil {
+ body.Tree = commit.Tree.SHA
}
req, err := s.client.NewRequest("POST", u, body)
@@ -118,7 +124,7 @@ func (s *GitService) CreateCommit(owner string, repo string, commit *Commit) (*C
}
c := new(Commit)
- resp, err := s.client.Do(req, c)
+ resp, err := s.client.Do(ctx, req, c)
if err != nil {
return nil, resp, err
}
diff --git a/vendor/github.com/google/go-github/github/git_refs.go b/vendor/github.com/google/go-github/github/git_refs.go
index bcec615..bd5df3f 100644
--- a/vendor/github.com/google/go-github/github/git_refs.go
+++ b/vendor/github.com/google/go-github/github/git_refs.go
@@ -6,6 +6,7 @@
package github
import (
+ "context"
"fmt"
"strings"
)
@@ -47,7 +48,7 @@ type updateRefRequest struct {
// GetRef fetches the Reference object for a given Git ref.
//
// GitHub API docs: https://developer.github.com/v3/git/refs/#get-a-reference
-func (s *GitService) GetRef(owner string, repo string, ref string) (*Reference, *Response, error) {
+func (s *GitService) GetRef(ctx context.Context, owner string, repo string, ref string) (*Reference, *Response, error) {
ref = strings.TrimPrefix(ref, "refs/")
u := fmt.Sprintf("repos/%v/%v/git/refs/%v", owner, repo, ref)
req, err := s.client.NewRequest("GET", u, nil)
@@ -56,7 +57,7 @@ func (s *GitService) GetRef(owner string, repo string, ref string) (*Reference,
}
r := new(Reference)
- resp, err := s.client.Do(req, r)
+ resp, err := s.client.Do(ctx, req, r)
if err != nil {
return nil, resp, err
}
@@ -75,7 +76,7 @@ type ReferenceListOptions struct {
// ListRefs lists all refs in a repository.
//
// GitHub API docs: https://developer.github.com/v3/git/refs/#get-all-references
-func (s *GitService) ListRefs(owner, repo string, opt *ReferenceListOptions) ([]*Reference, *Response, error) {
+func (s *GitService) ListRefs(ctx context.Context, owner, repo string, opt *ReferenceListOptions) ([]*Reference, *Response, error) {
var u string
if opt != nil && opt.Type != "" {
u = fmt.Sprintf("repos/%v/%v/git/refs/%v", owner, repo, opt.Type)
@@ -93,7 +94,7 @@ func (s *GitService) ListRefs(owner, repo string, opt *ReferenceListOptions) ([]
}
var rs []*Reference
- resp, err := s.client.Do(req, &rs)
+ resp, err := s.client.Do(ctx, req, &rs)
if err != nil {
return nil, resp, err
}
@@ -104,7 +105,7 @@ func (s *GitService) ListRefs(owner, repo string, opt *ReferenceListOptions) ([]
// CreateRef creates a new ref in a repository.
//
// GitHub API docs: https://developer.github.com/v3/git/refs/#create-a-reference
-func (s *GitService) CreateRef(owner string, repo string, ref *Reference) (*Reference, *Response, error) {
+func (s *GitService) CreateRef(ctx context.Context, owner string, repo string, ref *Reference) (*Reference, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/git/refs", owner, repo)
req, err := s.client.NewRequest("POST", u, &createRefRequest{
// back-compat with previous behavior that didn't require 'refs/' prefix
@@ -116,7 +117,7 @@ func (s *GitService) CreateRef(owner string, repo string, ref *Reference) (*Refe
}
r := new(Reference)
- resp, err := s.client.Do(req, r)
+ resp, err := s.client.Do(ctx, req, r)
if err != nil {
return nil, resp, err
}
@@ -127,7 +128,7 @@ func (s *GitService) CreateRef(owner string, repo string, ref *Reference) (*Refe
// UpdateRef updates an existing ref in a repository.
//
// GitHub API docs: https://developer.github.com/v3/git/refs/#update-a-reference
-func (s *GitService) UpdateRef(owner string, repo string, ref *Reference, force bool) (*Reference, *Response, error) {
+func (s *GitService) UpdateRef(ctx context.Context, owner string, repo string, ref *Reference, force bool) (*Reference, *Response, error) {
refPath := strings.TrimPrefix(*ref.Ref, "refs/")
u := fmt.Sprintf("repos/%v/%v/git/refs/%v", owner, repo, refPath)
req, err := s.client.NewRequest("PATCH", u, &updateRefRequest{
@@ -139,7 +140,7 @@ func (s *GitService) UpdateRef(owner string, repo string, ref *Reference, force
}
r := new(Reference)
- resp, err := s.client.Do(req, r)
+ resp, err := s.client.Do(ctx, req, r)
if err != nil {
return nil, resp, err
}
@@ -150,7 +151,7 @@ func (s *GitService) UpdateRef(owner string, repo string, ref *Reference, force
// DeleteRef deletes a ref from a repository.
//
// GitHub API docs: https://developer.github.com/v3/git/refs/#delete-a-reference
-func (s *GitService) DeleteRef(owner string, repo string, ref string) (*Response, error) {
+func (s *GitService) DeleteRef(ctx context.Context, owner string, repo string, ref string) (*Response, error) {
ref = strings.TrimPrefix(ref, "refs/")
u := fmt.Sprintf("repos/%v/%v/git/refs/%v", owner, repo, ref)
req, err := s.client.NewRequest("DELETE", u, nil)
@@ -158,5 +159,5 @@ func (s *GitService) DeleteRef(owner string, repo string, ref string) (*Response
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
diff --git a/vendor/github.com/google/go-github/github/git_tags.go b/vendor/github.com/google/go-github/github/git_tags.go
index a58858b..08df3d3 100644
--- a/vendor/github.com/google/go-github/github/git_tags.go
+++ b/vendor/github.com/google/go-github/github/git_tags.go
@@ -6,6 +6,7 @@
package github
import (
+ "context"
"fmt"
)
@@ -34,7 +35,7 @@ type createTagRequest struct {
// GetTag fetchs a tag from a repo given a SHA.
//
// GitHub API docs: https://developer.github.com/v3/git/tags/#get-a-tag
-func (s *GitService) GetTag(owner string, repo string, sha string) (*Tag, *Response, error) {
+func (s *GitService) GetTag(ctx context.Context, owner string, repo string, sha string) (*Tag, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/git/tags/%v", owner, repo, sha)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -45,14 +46,14 @@ func (s *GitService) GetTag(owner string, repo string, sha string) (*Tag, *Respo
req.Header.Set("Accept", mediaTypeGitSigningPreview)
tag := new(Tag)
- resp, err := s.client.Do(req, tag)
+ resp, err := s.client.Do(ctx, req, tag)
return tag, resp, err
}
// CreateTag creates a tag object.
//
// GitHub API docs: https://developer.github.com/v3/git/tags/#create-a-tag-object
-func (s *GitService) CreateTag(owner string, repo string, tag *Tag) (*Tag, *Response, error) {
+func (s *GitService) CreateTag(ctx context.Context, owner string, repo string, tag *Tag) (*Tag, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/git/tags", owner, repo)
// convert Tag into a createTagRequest
@@ -72,6 +73,6 @@ func (s *GitService) CreateTag(owner string, repo string, tag *Tag) (*Tag, *Resp
}
t := new(Tag)
- resp, err := s.client.Do(req, t)
+ resp, err := s.client.Do(ctx, req, t)
return t, resp, err
}
diff --git a/vendor/github.com/google/go-github/github/git_trees.go b/vendor/github.com/google/go-github/github/git_trees.go
index 13acfa6..bdd481f 100644
--- a/vendor/github.com/google/go-github/github/git_trees.go
+++ b/vendor/github.com/google/go-github/github/git_trees.go
@@ -5,7 +5,10 @@
package github
-import "fmt"
+import (
+ "context"
+ "fmt"
+)
// Tree represents a GitHub tree.
type Tree struct {
@@ -36,7 +39,7 @@ func (t TreeEntry) String() string {
// GetTree fetches the Tree object for a given sha hash from a repository.
//
// GitHub API docs: https://developer.github.com/v3/git/trees/#get-a-tree
-func (s *GitService) GetTree(owner string, repo string, sha string, recursive bool) (*Tree, *Response, error) {
+func (s *GitService) GetTree(ctx context.Context, owner string, repo string, sha string, recursive bool) (*Tree, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/git/trees/%v", owner, repo, sha)
if recursive {
u += "?recursive=1"
@@ -48,7 +51,7 @@ func (s *GitService) GetTree(owner string, repo string, sha string, recursive bo
}
t := new(Tree)
- resp, err := s.client.Do(req, t)
+ resp, err := s.client.Do(ctx, req, t)
if err != nil {
return nil, resp, err
}
@@ -67,7 +70,7 @@ type createTree struct {
// that tree with the new path contents and write a new tree out.
//
// GitHub API docs: https://developer.github.com/v3/git/trees/#create-a-tree
-func (s *GitService) CreateTree(owner string, repo string, baseTree string, entries []TreeEntry) (*Tree, *Response, error) {
+func (s *GitService) CreateTree(ctx context.Context, owner string, repo string, baseTree string, entries []TreeEntry) (*Tree, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/git/trees", owner, repo)
body := &createTree{
@@ -80,7 +83,7 @@ func (s *GitService) CreateTree(owner string, repo string, baseTree string, entr
}
t := new(Tree)
- resp, err := s.client.Do(req, t)
+ resp, err := s.client.Do(ctx, req, t)
if err != nil {
return nil, resp, err
}
diff --git a/vendor/github.com/google/go-github/github/github-accessors.go b/vendor/github.com/google/go-github/github/github-accessors.go
new file mode 100644
index 0000000..c74c025
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/github-accessors.go
@@ -0,0 +1,7277 @@
+// Copyright 2017 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by gen-accessors; DO NOT EDIT.
+
+package github
+
+import (
+ "encoding/json"
+ "time"
+)
+
+// GetRetryAfter returns the RetryAfter field if it's non-nil, zero value otherwise.
+func (a *AbuseRateLimitError) GetRetryAfter() time.Duration {
+ if a == nil || a.RetryAfter == nil {
+ return 0
+ }
+ return *a.RetryAfter
+}
+
+// GetVerifiablePasswordAuthentication returns the VerifiablePasswordAuthentication field if it's non-nil, zero value otherwise.
+func (a *APIMeta) GetVerifiablePasswordAuthentication() bool {
+ if a == nil || a.VerifiablePasswordAuthentication == nil {
+ return false
+ }
+ return *a.VerifiablePasswordAuthentication
+}
+
+// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
+func (a *Authorization) GetCreatedAt() Timestamp {
+ if a == nil || a.CreatedAt == nil {
+ return Timestamp{}
+ }
+ return *a.CreatedAt
+}
+
+// GetFingerprint returns the Fingerprint field if it's non-nil, zero value otherwise.
+func (a *Authorization) GetFingerprint() string {
+ if a == nil || a.Fingerprint == nil {
+ return ""
+ }
+ return *a.Fingerprint
+}
+
+// GetHashedToken returns the HashedToken field if it's non-nil, zero value otherwise.
+func (a *Authorization) GetHashedToken() string {
+ if a == nil || a.HashedToken == nil {
+ return ""
+ }
+ return *a.HashedToken
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (a *Authorization) GetID() int {
+ if a == nil || a.ID == nil {
+ return 0
+ }
+ return *a.ID
+}
+
+// GetNote returns the Note field if it's non-nil, zero value otherwise.
+func (a *Authorization) GetNote() string {
+ if a == nil || a.Note == nil {
+ return ""
+ }
+ return *a.Note
+}
+
+// GetNoteURL returns the NoteURL field if it's non-nil, zero value otherwise.
+func (a *Authorization) GetNoteURL() string {
+ if a == nil || a.NoteURL == nil {
+ return ""
+ }
+ return *a.NoteURL
+}
+
+// GetToken returns the Token field if it's non-nil, zero value otherwise.
+func (a *Authorization) GetToken() string {
+ if a == nil || a.Token == nil {
+ return ""
+ }
+ return *a.Token
+}
+
+// GetTokenLastEight returns the TokenLastEight field if it's non-nil, zero value otherwise.
+func (a *Authorization) GetTokenLastEight() string {
+ if a == nil || a.TokenLastEight == nil {
+ return ""
+ }
+ return *a.TokenLastEight
+}
+
+// GetUpdateAt returns the UpdateAt field if it's non-nil, zero value otherwise.
+func (a *Authorization) GetUpdateAt() Timestamp {
+ if a == nil || a.UpdateAt == nil {
+ return Timestamp{}
+ }
+ return *a.UpdateAt
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (a *Authorization) GetURL() string {
+ if a == nil || a.URL == nil {
+ return ""
+ }
+ return *a.URL
+}
+
+// GetClientID returns the ClientID field if it's non-nil, zero value otherwise.
+func (a *AuthorizationApp) GetClientID() string {
+ if a == nil || a.ClientID == nil {
+ return ""
+ }
+ return *a.ClientID
+}
+
+// GetName returns the Name field if it's non-nil, zero value otherwise.
+func (a *AuthorizationApp) GetName() string {
+ if a == nil || a.Name == nil {
+ return ""
+ }
+ return *a.Name
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (a *AuthorizationApp) GetURL() string {
+ if a == nil || a.URL == nil {
+ return ""
+ }
+ return *a.URL
+}
+
+// GetClientID returns the ClientID field if it's non-nil, zero value otherwise.
+func (a *AuthorizationRequest) GetClientID() string {
+ if a == nil || a.ClientID == nil {
+ return ""
+ }
+ return *a.ClientID
+}
+
+// GetClientSecret returns the ClientSecret field if it's non-nil, zero value otherwise.
+func (a *AuthorizationRequest) GetClientSecret() string {
+ if a == nil || a.ClientSecret == nil {
+ return ""
+ }
+ return *a.ClientSecret
+}
+
+// GetFingerprint returns the Fingerprint field if it's non-nil, zero value otherwise.
+func (a *AuthorizationRequest) GetFingerprint() string {
+ if a == nil || a.Fingerprint == nil {
+ return ""
+ }
+ return *a.Fingerprint
+}
+
+// GetNote returns the Note field if it's non-nil, zero value otherwise.
+func (a *AuthorizationRequest) GetNote() string {
+ if a == nil || a.Note == nil {
+ return ""
+ }
+ return *a.Note
+}
+
+// GetNoteURL returns the NoteURL field if it's non-nil, zero value otherwise.
+func (a *AuthorizationRequest) GetNoteURL() string {
+ if a == nil || a.NoteURL == nil {
+ return ""
+ }
+ return *a.NoteURL
+}
+
+// GetFingerprint returns the Fingerprint field if it's non-nil, zero value otherwise.
+func (a *AuthorizationUpdateRequest) GetFingerprint() string {
+ if a == nil || a.Fingerprint == nil {
+ return ""
+ }
+ return *a.Fingerprint
+}
+
+// GetNote returns the Note field if it's non-nil, zero value otherwise.
+func (a *AuthorizationUpdateRequest) GetNote() string {
+ if a == nil || a.Note == nil {
+ return ""
+ }
+ return *a.Note
+}
+
+// GetNoteURL returns the NoteURL field if it's non-nil, zero value otherwise.
+func (a *AuthorizationUpdateRequest) GetNoteURL() string {
+ if a == nil || a.NoteURL == nil {
+ return ""
+ }
+ return *a.NoteURL
+}
+
+// GetContent returns the Content field if it's non-nil, zero value otherwise.
+func (b *Blob) GetContent() string {
+ if b == nil || b.Content == nil {
+ return ""
+ }
+ return *b.Content
+}
+
+// GetEncoding returns the Encoding field if it's non-nil, zero value otherwise.
+func (b *Blob) GetEncoding() string {
+ if b == nil || b.Encoding == nil {
+ return ""
+ }
+ return *b.Encoding
+}
+
+// GetSHA returns the SHA field if it's non-nil, zero value otherwise.
+func (b *Blob) GetSHA() string {
+ if b == nil || b.SHA == nil {
+ return ""
+ }
+ return *b.SHA
+}
+
+// GetSize returns the Size field if it's non-nil, zero value otherwise.
+func (b *Blob) GetSize() int {
+ if b == nil || b.Size == nil {
+ return 0
+ }
+ return *b.Size
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (b *Blob) GetURL() string {
+ if b == nil || b.URL == nil {
+ return ""
+ }
+ return *b.URL
+}
+
+// GetName returns the Name field if it's non-nil, zero value otherwise.
+func (b *Branch) GetName() string {
+ if b == nil || b.Name == nil {
+ return ""
+ }
+ return *b.Name
+}
+
+// GetProtected returns the Protected field if it's non-nil, zero value otherwise.
+func (b *Branch) GetProtected() bool {
+ if b == nil || b.Protected == nil {
+ return false
+ }
+ return *b.Protected
+}
+
+// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise.
+func (c *CodeResult) GetHTMLURL() string {
+ if c == nil || c.HTMLURL == nil {
+ return ""
+ }
+ return *c.HTMLURL
+}
+
+// GetName returns the Name field if it's non-nil, zero value otherwise.
+func (c *CodeResult) GetName() string {
+ if c == nil || c.Name == nil {
+ return ""
+ }
+ return *c.Name
+}
+
+// GetPath returns the Path field if it's non-nil, zero value otherwise.
+func (c *CodeResult) GetPath() string {
+ if c == nil || c.Path == nil {
+ return ""
+ }
+ return *c.Path
+}
+
+// GetSHA returns the SHA field if it's non-nil, zero value otherwise.
+func (c *CodeResult) GetSHA() string {
+ if c == nil || c.SHA == nil {
+ return ""
+ }
+ return *c.SHA
+}
+
+// GetIncompleteResults returns the IncompleteResults field if it's non-nil, zero value otherwise.
+func (c *CodeSearchResult) GetIncompleteResults() bool {
+ if c == nil || c.IncompleteResults == nil {
+ return false
+ }
+ return *c.IncompleteResults
+}
+
+// GetTotal returns the Total field if it's non-nil, zero value otherwise.
+func (c *CodeSearchResult) GetTotal() int {
+ if c == nil || c.Total == nil {
+ return 0
+ }
+ return *c.Total
+}
+
+// GetCommitURL returns the CommitURL field if it's non-nil, zero value otherwise.
+func (c *CombinedStatus) GetCommitURL() string {
+ if c == nil || c.CommitURL == nil {
+ return ""
+ }
+ return *c.CommitURL
+}
+
+// GetName returns the Name field if it's non-nil, zero value otherwise.
+func (c *CombinedStatus) GetName() string {
+ if c == nil || c.Name == nil {
+ return ""
+ }
+ return *c.Name
+}
+
+// GetRepositoryURL returns the RepositoryURL field if it's non-nil, zero value otherwise.
+func (c *CombinedStatus) GetRepositoryURL() string {
+ if c == nil || c.RepositoryURL == nil {
+ return ""
+ }
+ return *c.RepositoryURL
+}
+
+// GetSHA returns the SHA field if it's non-nil, zero value otherwise.
+func (c *CombinedStatus) GetSHA() string {
+ if c == nil || c.SHA == nil {
+ return ""
+ }
+ return *c.SHA
+}
+
+// GetState returns the State field if it's non-nil, zero value otherwise.
+func (c *CombinedStatus) GetState() string {
+ if c == nil || c.State == nil {
+ return ""
+ }
+ return *c.State
+}
+
+// GetTotalCount returns the TotalCount field if it's non-nil, zero value otherwise.
+func (c *CombinedStatus) GetTotalCount() int {
+ if c == nil || c.TotalCount == nil {
+ return 0
+ }
+ return *c.TotalCount
+}
+
+// GetCommentCount returns the CommentCount field if it's non-nil, zero value otherwise.
+func (c *Commit) GetCommentCount() int {
+ if c == nil || c.CommentCount == nil {
+ return 0
+ }
+ return *c.CommentCount
+}
+
+// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise.
+func (c *Commit) GetHTMLURL() string {
+ if c == nil || c.HTMLURL == nil {
+ return ""
+ }
+ return *c.HTMLURL
+}
+
+// GetMessage returns the Message field if it's non-nil, zero value otherwise.
+func (c *Commit) GetMessage() string {
+ if c == nil || c.Message == nil {
+ return ""
+ }
+ return *c.Message
+}
+
+// GetSHA returns the SHA field if it's non-nil, zero value otherwise.
+func (c *Commit) GetSHA() string {
+ if c == nil || c.SHA == nil {
+ return ""
+ }
+ return *c.SHA
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (c *Commit) GetURL() string {
+ if c == nil || c.URL == nil {
+ return ""
+ }
+ return *c.URL
+}
+
+// GetDate returns the Date field if it's non-nil, zero value otherwise.
+func (c *CommitAuthor) GetDate() time.Time {
+ if c == nil || c.Date == nil {
+ return time.Time{}
+ }
+ return *c.Date
+}
+
+// GetEmail returns the Email field if it's non-nil, zero value otherwise.
+func (c *CommitAuthor) GetEmail() string {
+ if c == nil || c.Email == nil {
+ return ""
+ }
+ return *c.Email
+}
+
+// GetLogin returns the Login field if it's non-nil, zero value otherwise.
+func (c *CommitAuthor) GetLogin() string {
+ if c == nil || c.Login == nil {
+ return ""
+ }
+ return *c.Login
+}
+
+// GetName returns the Name field if it's non-nil, zero value otherwise.
+func (c *CommitAuthor) GetName() string {
+ if c == nil || c.Name == nil {
+ return ""
+ }
+ return *c.Name
+}
+
+// GetAction returns the Action field if it's non-nil, zero value otherwise.
+func (c *CommitCommentEvent) GetAction() string {
+ if c == nil || c.Action == nil {
+ return ""
+ }
+ return *c.Action
+}
+
+// GetAdditions returns the Additions field if it's non-nil, zero value otherwise.
+func (c *CommitFile) GetAdditions() int {
+ if c == nil || c.Additions == nil {
+ return 0
+ }
+ return *c.Additions
+}
+
+// GetBlobURL returns the BlobURL field if it's non-nil, zero value otherwise.
+func (c *CommitFile) GetBlobURL() string {
+ if c == nil || c.BlobURL == nil {
+ return ""
+ }
+ return *c.BlobURL
+}
+
+// GetChanges returns the Changes field if it's non-nil, zero value otherwise.
+func (c *CommitFile) GetChanges() int {
+ if c == nil || c.Changes == nil {
+ return 0
+ }
+ return *c.Changes
+}
+
+// GetContentsURL returns the ContentsURL field if it's non-nil, zero value otherwise.
+func (c *CommitFile) GetContentsURL() string {
+ if c == nil || c.ContentsURL == nil {
+ return ""
+ }
+ return *c.ContentsURL
+}
+
+// GetDeletions returns the Deletions field if it's non-nil, zero value otherwise.
+func (c *CommitFile) GetDeletions() int {
+ if c == nil || c.Deletions == nil {
+ return 0
+ }
+ return *c.Deletions
+}
+
+// GetFilename returns the Filename field if it's non-nil, zero value otherwise.
+func (c *CommitFile) GetFilename() string {
+ if c == nil || c.Filename == nil {
+ return ""
+ }
+ return *c.Filename
+}
+
+// GetPatch returns the Patch field if it's non-nil, zero value otherwise.
+func (c *CommitFile) GetPatch() string {
+ if c == nil || c.Patch == nil {
+ return ""
+ }
+ return *c.Patch
+}
+
+// GetRawURL returns the RawURL field if it's non-nil, zero value otherwise.
+func (c *CommitFile) GetRawURL() string {
+ if c == nil || c.RawURL == nil {
+ return ""
+ }
+ return *c.RawURL
+}
+
+// GetSHA returns the SHA field if it's non-nil, zero value otherwise.
+func (c *CommitFile) GetSHA() string {
+ if c == nil || c.SHA == nil {
+ return ""
+ }
+ return *c.SHA
+}
+
+// GetStatus returns the Status field if it's non-nil, zero value otherwise.
+func (c *CommitFile) GetStatus() string {
+ if c == nil || c.Status == nil {
+ return ""
+ }
+ return *c.Status
+}
+
+// GetCommentsURL returns the CommentsURL field if it's non-nil, zero value otherwise.
+func (c *CommitResult) GetCommentsURL() string {
+ if c == nil || c.CommentsURL == nil {
+ return ""
+ }
+ return *c.CommentsURL
+}
+
+// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise.
+func (c *CommitResult) GetHTMLURL() string {
+ if c == nil || c.HTMLURL == nil {
+ return ""
+ }
+ return *c.HTMLURL
+}
+
+// GetSHA returns the SHA field if it's non-nil, zero value otherwise.
+func (c *CommitResult) GetSHA() string {
+ if c == nil || c.SHA == nil {
+ return ""
+ }
+ return *c.SHA
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (c *CommitResult) GetURL() string {
+ if c == nil || c.URL == nil {
+ return ""
+ }
+ return *c.URL
+}
+
+// GetAheadBy returns the AheadBy field if it's non-nil, zero value otherwise.
+func (c *CommitsComparison) GetAheadBy() int {
+ if c == nil || c.AheadBy == nil {
+ return 0
+ }
+ return *c.AheadBy
+}
+
+// GetBehindBy returns the BehindBy field if it's non-nil, zero value otherwise.
+func (c *CommitsComparison) GetBehindBy() int {
+ if c == nil || c.BehindBy == nil {
+ return 0
+ }
+ return *c.BehindBy
+}
+
+// GetStatus returns the Status field if it's non-nil, zero value otherwise.
+func (c *CommitsComparison) GetStatus() string {
+ if c == nil || c.Status == nil {
+ return ""
+ }
+ return *c.Status
+}
+
+// GetTotalCommits returns the TotalCommits field if it's non-nil, zero value otherwise.
+func (c *CommitsComparison) GetTotalCommits() int {
+ if c == nil || c.TotalCommits == nil {
+ return 0
+ }
+ return *c.TotalCommits
+}
+
+// GetIncompleteResults returns the IncompleteResults field if it's non-nil, zero value otherwise.
+func (c *CommitsSearchResult) GetIncompleteResults() bool {
+ if c == nil || c.IncompleteResults == nil {
+ return false
+ }
+ return *c.IncompleteResults
+}
+
+// GetTotal returns the Total field if it's non-nil, zero value otherwise.
+func (c *CommitsSearchResult) GetTotal() int {
+ if c == nil || c.Total == nil {
+ return 0
+ }
+ return *c.Total
+}
+
+// GetAdditions returns the Additions field if it's non-nil, zero value otherwise.
+func (c *CommitStats) GetAdditions() int {
+ if c == nil || c.Additions == nil {
+ return 0
+ }
+ return *c.Additions
+}
+
+// GetDeletions returns the Deletions field if it's non-nil, zero value otherwise.
+func (c *CommitStats) GetDeletions() int {
+ if c == nil || c.Deletions == nil {
+ return 0
+ }
+ return *c.Deletions
+}
+
+// GetTotal returns the Total field if it's non-nil, zero value otherwise.
+func (c *CommitStats) GetTotal() int {
+ if c == nil || c.Total == nil {
+ return 0
+ }
+ return *c.Total
+}
+
+// GetAvatarURL returns the AvatarURL field if it's non-nil, zero value otherwise.
+func (c *Contributor) GetAvatarURL() string {
+ if c == nil || c.AvatarURL == nil {
+ return ""
+ }
+ return *c.AvatarURL
+}
+
+// GetContributions returns the Contributions field if it's non-nil, zero value otherwise.
+func (c *Contributor) GetContributions() int {
+ if c == nil || c.Contributions == nil {
+ return 0
+ }
+ return *c.Contributions
+}
+
+// GetEventsURL returns the EventsURL field if it's non-nil, zero value otherwise.
+func (c *Contributor) GetEventsURL() string {
+ if c == nil || c.EventsURL == nil {
+ return ""
+ }
+ return *c.EventsURL
+}
+
+// GetFollowersURL returns the FollowersURL field if it's non-nil, zero value otherwise.
+func (c *Contributor) GetFollowersURL() string {
+ if c == nil || c.FollowersURL == nil {
+ return ""
+ }
+ return *c.FollowersURL
+}
+
+// GetFollowingURL returns the FollowingURL field if it's non-nil, zero value otherwise.
+func (c *Contributor) GetFollowingURL() string {
+ if c == nil || c.FollowingURL == nil {
+ return ""
+ }
+ return *c.FollowingURL
+}
+
+// GetGistsURL returns the GistsURL field if it's non-nil, zero value otherwise.
+func (c *Contributor) GetGistsURL() string {
+ if c == nil || c.GistsURL == nil {
+ return ""
+ }
+ return *c.GistsURL
+}
+
+// GetGravatarID returns the GravatarID field if it's non-nil, zero value otherwise.
+func (c *Contributor) GetGravatarID() string {
+ if c == nil || c.GravatarID == nil {
+ return ""
+ }
+ return *c.GravatarID
+}
+
+// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise.
+func (c *Contributor) GetHTMLURL() string {
+ if c == nil || c.HTMLURL == nil {
+ return ""
+ }
+ return *c.HTMLURL
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (c *Contributor) GetID() int {
+ if c == nil || c.ID == nil {
+ return 0
+ }
+ return *c.ID
+}
+
+// GetLogin returns the Login field if it's non-nil, zero value otherwise.
+func (c *Contributor) GetLogin() string {
+ if c == nil || c.Login == nil {
+ return ""
+ }
+ return *c.Login
+}
+
+// GetOrganizationsURL returns the OrganizationsURL field if it's non-nil, zero value otherwise.
+func (c *Contributor) GetOrganizationsURL() string {
+ if c == nil || c.OrganizationsURL == nil {
+ return ""
+ }
+ return *c.OrganizationsURL
+}
+
+// GetReceivedEventsURL returns the ReceivedEventsURL field if it's non-nil, zero value otherwise.
+func (c *Contributor) GetReceivedEventsURL() string {
+ if c == nil || c.ReceivedEventsURL == nil {
+ return ""
+ }
+ return *c.ReceivedEventsURL
+}
+
+// GetReposURL returns the ReposURL field if it's non-nil, zero value otherwise.
+func (c *Contributor) GetReposURL() string {
+ if c == nil || c.ReposURL == nil {
+ return ""
+ }
+ return *c.ReposURL
+}
+
+// GetSiteAdmin returns the SiteAdmin field if it's non-nil, zero value otherwise.
+func (c *Contributor) GetSiteAdmin() bool {
+ if c == nil || c.SiteAdmin == nil {
+ return false
+ }
+ return *c.SiteAdmin
+}
+
+// GetStarredURL returns the StarredURL field if it's non-nil, zero value otherwise.
+func (c *Contributor) GetStarredURL() string {
+ if c == nil || c.StarredURL == nil {
+ return ""
+ }
+ return *c.StarredURL
+}
+
+// GetSubscriptionsURL returns the SubscriptionsURL field if it's non-nil, zero value otherwise.
+func (c *Contributor) GetSubscriptionsURL() string {
+ if c == nil || c.SubscriptionsURL == nil {
+ return ""
+ }
+ return *c.SubscriptionsURL
+}
+
+// GetType returns the Type field if it's non-nil, zero value otherwise.
+func (c *Contributor) GetType() string {
+ if c == nil || c.Type == nil {
+ return ""
+ }
+ return *c.Type
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (c *Contributor) GetURL() string {
+ if c == nil || c.URL == nil {
+ return ""
+ }
+ return *c.URL
+}
+
+// GetTotal returns the Total field if it's non-nil, zero value otherwise.
+func (c *ContributorStats) GetTotal() int {
+ if c == nil || c.Total == nil {
+ return 0
+ }
+ return *c.Total
+}
+
+// GetMessage returns the Message field if it's non-nil, zero value otherwise.
+func (c *createCommit) GetMessage() string {
+ if c == nil || c.Message == nil {
+ return ""
+ }
+ return *c.Message
+}
+
+// GetTree returns the Tree field if it's non-nil, zero value otherwise.
+func (c *createCommit) GetTree() string {
+ if c == nil || c.Tree == nil {
+ return ""
+ }
+ return *c.Tree
+}
+
+// GetDescription returns the Description field if it's non-nil, zero value otherwise.
+func (c *CreateEvent) GetDescription() string {
+ if c == nil || c.Description == nil {
+ return ""
+ }
+ return *c.Description
+}
+
+// GetMasterBranch returns the MasterBranch field if it's non-nil, zero value otherwise.
+func (c *CreateEvent) GetMasterBranch() string {
+ if c == nil || c.MasterBranch == nil {
+ return ""
+ }
+ return *c.MasterBranch
+}
+
+// GetPusherType returns the PusherType field if it's non-nil, zero value otherwise.
+func (c *CreateEvent) GetPusherType() string {
+ if c == nil || c.PusherType == nil {
+ return ""
+ }
+ return *c.PusherType
+}
+
+// GetRef returns the Ref field if it's non-nil, zero value otherwise.
+func (c *CreateEvent) GetRef() string {
+ if c == nil || c.Ref == nil {
+ return ""
+ }
+ return *c.Ref
+}
+
+// GetRefType returns the RefType field if it's non-nil, zero value otherwise.
+func (c *CreateEvent) GetRefType() string {
+ if c == nil || c.RefType == nil {
+ return ""
+ }
+ return *c.RefType
+}
+
+// GetRef returns the Ref field if it's non-nil, zero value otherwise.
+func (c *createRefRequest) GetRef() string {
+ if c == nil || c.Ref == nil {
+ return ""
+ }
+ return *c.Ref
+}
+
+// GetSHA returns the SHA field if it's non-nil, zero value otherwise.
+func (c *createRefRequest) GetSHA() string {
+ if c == nil || c.SHA == nil {
+ return ""
+ }
+ return *c.SHA
+}
+
+// GetMessage returns the Message field if it's non-nil, zero value otherwise.
+func (c *createTagRequest) GetMessage() string {
+ if c == nil || c.Message == nil {
+ return ""
+ }
+ return *c.Message
+}
+
+// GetObject returns the Object field if it's non-nil, zero value otherwise.
+func (c *createTagRequest) GetObject() string {
+ if c == nil || c.Object == nil {
+ return ""
+ }
+ return *c.Object
+}
+
+// GetTag returns the Tag field if it's non-nil, zero value otherwise.
+func (c *createTagRequest) GetTag() string {
+ if c == nil || c.Tag == nil {
+ return ""
+ }
+ return *c.Tag
+}
+
+// GetType returns the Type field if it's non-nil, zero value otherwise.
+func (c *createTagRequest) GetType() string {
+ if c == nil || c.Type == nil {
+ return ""
+ }
+ return *c.Type
+}
+
+// GetPusherType returns the PusherType field if it's non-nil, zero value otherwise.
+func (d *DeleteEvent) GetPusherType() string {
+ if d == nil || d.PusherType == nil {
+ return ""
+ }
+ return *d.PusherType
+}
+
+// GetRef returns the Ref field if it's non-nil, zero value otherwise.
+func (d *DeleteEvent) GetRef() string {
+ if d == nil || d.Ref == nil {
+ return ""
+ }
+ return *d.Ref
+}
+
+// GetRefType returns the RefType field if it's non-nil, zero value otherwise.
+func (d *DeleteEvent) GetRefType() string {
+ if d == nil || d.RefType == nil {
+ return ""
+ }
+ return *d.RefType
+}
+
+// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
+func (d *Deployment) GetCreatedAt() Timestamp {
+ if d == nil || d.CreatedAt == nil {
+ return Timestamp{}
+ }
+ return *d.CreatedAt
+}
+
+// GetDescription returns the Description field if it's non-nil, zero value otherwise.
+func (d *Deployment) GetDescription() string {
+ if d == nil || d.Description == nil {
+ return ""
+ }
+ return *d.Description
+}
+
+// GetEnvironment returns the Environment field if it's non-nil, zero value otherwise.
+func (d *Deployment) GetEnvironment() string {
+ if d == nil || d.Environment == nil {
+ return ""
+ }
+ return *d.Environment
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (d *Deployment) GetID() int {
+ if d == nil || d.ID == nil {
+ return 0
+ }
+ return *d.ID
+}
+
+// GetRef returns the Ref field if it's non-nil, zero value otherwise.
+func (d *Deployment) GetRef() string {
+ if d == nil || d.Ref == nil {
+ return ""
+ }
+ return *d.Ref
+}
+
+// GetRepositoryURL returns the RepositoryURL field if it's non-nil, zero value otherwise.
+func (d *Deployment) GetRepositoryURL() string {
+ if d == nil || d.RepositoryURL == nil {
+ return ""
+ }
+ return *d.RepositoryURL
+}
+
+// GetSHA returns the SHA field if it's non-nil, zero value otherwise.
+func (d *Deployment) GetSHA() string {
+ if d == nil || d.SHA == nil {
+ return ""
+ }
+ return *d.SHA
+}
+
+// GetStatusesURL returns the StatusesURL field if it's non-nil, zero value otherwise.
+func (d *Deployment) GetStatusesURL() string {
+ if d == nil || d.StatusesURL == nil {
+ return ""
+ }
+ return *d.StatusesURL
+}
+
+// GetTask returns the Task field if it's non-nil, zero value otherwise.
+func (d *Deployment) GetTask() string {
+ if d == nil || d.Task == nil {
+ return ""
+ }
+ return *d.Task
+}
+
+// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise.
+func (d *Deployment) GetUpdatedAt() Timestamp {
+ if d == nil || d.UpdatedAt == nil {
+ return Timestamp{}
+ }
+ return *d.UpdatedAt
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (d *Deployment) GetURL() string {
+ if d == nil || d.URL == nil {
+ return ""
+ }
+ return *d.URL
+}
+
+// GetAutoMerge returns the AutoMerge field if it's non-nil, zero value otherwise.
+func (d *DeploymentRequest) GetAutoMerge() bool {
+ if d == nil || d.AutoMerge == nil {
+ return false
+ }
+ return *d.AutoMerge
+}
+
+// GetDescription returns the Description field if it's non-nil, zero value otherwise.
+func (d *DeploymentRequest) GetDescription() string {
+ if d == nil || d.Description == nil {
+ return ""
+ }
+ return *d.Description
+}
+
+// GetEnvironment returns the Environment field if it's non-nil, zero value otherwise.
+func (d *DeploymentRequest) GetEnvironment() string {
+ if d == nil || d.Environment == nil {
+ return ""
+ }
+ return *d.Environment
+}
+
+// GetPayload returns the Payload field if it's non-nil, zero value otherwise.
+func (d *DeploymentRequest) GetPayload() string {
+ if d == nil || d.Payload == nil {
+ return ""
+ }
+ return *d.Payload
+}
+
+// GetProductionEnvironment returns the ProductionEnvironment field if it's non-nil, zero value otherwise.
+func (d *DeploymentRequest) GetProductionEnvironment() bool {
+ if d == nil || d.ProductionEnvironment == nil {
+ return false
+ }
+ return *d.ProductionEnvironment
+}
+
+// GetRef returns the Ref field if it's non-nil, zero value otherwise.
+func (d *DeploymentRequest) GetRef() string {
+ if d == nil || d.Ref == nil {
+ return ""
+ }
+ return *d.Ref
+}
+
+// GetRequiredContexts returns the RequiredContexts field if it's non-nil, zero value otherwise.
+func (d *DeploymentRequest) GetRequiredContexts() []string {
+ if d == nil || d.RequiredContexts == nil {
+ return nil
+ }
+ return *d.RequiredContexts
+}
+
+// GetTask returns the Task field if it's non-nil, zero value otherwise.
+func (d *DeploymentRequest) GetTask() string {
+ if d == nil || d.Task == nil {
+ return ""
+ }
+ return *d.Task
+}
+
+// GetTransientEnvironment returns the TransientEnvironment field if it's non-nil, zero value otherwise.
+func (d *DeploymentRequest) GetTransientEnvironment() bool {
+ if d == nil || d.TransientEnvironment == nil {
+ return false
+ }
+ return *d.TransientEnvironment
+}
+
+// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
+func (d *DeploymentStatus) GetCreatedAt() Timestamp {
+ if d == nil || d.CreatedAt == nil {
+ return Timestamp{}
+ }
+ return *d.CreatedAt
+}
+
+// GetDeploymentURL returns the DeploymentURL field if it's non-nil, zero value otherwise.
+func (d *DeploymentStatus) GetDeploymentURL() string {
+ if d == nil || d.DeploymentURL == nil {
+ return ""
+ }
+ return *d.DeploymentURL
+}
+
+// GetDescription returns the Description field if it's non-nil, zero value otherwise.
+func (d *DeploymentStatus) GetDescription() string {
+ if d == nil || d.Description == nil {
+ return ""
+ }
+ return *d.Description
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (d *DeploymentStatus) GetID() int {
+ if d == nil || d.ID == nil {
+ return 0
+ }
+ return *d.ID
+}
+
+// GetRepositoryURL returns the RepositoryURL field if it's non-nil, zero value otherwise.
+func (d *DeploymentStatus) GetRepositoryURL() string {
+ if d == nil || d.RepositoryURL == nil {
+ return ""
+ }
+ return *d.RepositoryURL
+}
+
+// GetState returns the State field if it's non-nil, zero value otherwise.
+func (d *DeploymentStatus) GetState() string {
+ if d == nil || d.State == nil {
+ return ""
+ }
+ return *d.State
+}
+
+// GetTargetURL returns the TargetURL field if it's non-nil, zero value otherwise.
+func (d *DeploymentStatus) GetTargetURL() string {
+ if d == nil || d.TargetURL == nil {
+ return ""
+ }
+ return *d.TargetURL
+}
+
+// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise.
+func (d *DeploymentStatus) GetUpdatedAt() Timestamp {
+ if d == nil || d.UpdatedAt == nil {
+ return Timestamp{}
+ }
+ return *d.UpdatedAt
+}
+
+// GetAutoInactive returns the AutoInactive field if it's non-nil, zero value otherwise.
+func (d *DeploymentStatusRequest) GetAutoInactive() bool {
+ if d == nil || d.AutoInactive == nil {
+ return false
+ }
+ return *d.AutoInactive
+}
+
+// GetDescription returns the Description field if it's non-nil, zero value otherwise.
+func (d *DeploymentStatusRequest) GetDescription() string {
+ if d == nil || d.Description == nil {
+ return ""
+ }
+ return *d.Description
+}
+
+// GetEnvironmentURL returns the EnvironmentURL field if it's non-nil, zero value otherwise.
+func (d *DeploymentStatusRequest) GetEnvironmentURL() string {
+ if d == nil || d.EnvironmentURL == nil {
+ return ""
+ }
+ return *d.EnvironmentURL
+}
+
+// GetLogURL returns the LogURL field if it's non-nil, zero value otherwise.
+func (d *DeploymentStatusRequest) GetLogURL() string {
+ if d == nil || d.LogURL == nil {
+ return ""
+ }
+ return *d.LogURL
+}
+
+// GetState returns the State field if it's non-nil, zero value otherwise.
+func (d *DeploymentStatusRequest) GetState() string {
+ if d == nil || d.State == nil {
+ return ""
+ }
+ return *d.State
+}
+
+// GetBody returns the Body field if it's non-nil, zero value otherwise.
+func (d *DraftReviewComment) GetBody() string {
+ if d == nil || d.Body == nil {
+ return ""
+ }
+ return *d.Body
+}
+
+// GetPath returns the Path field if it's non-nil, zero value otherwise.
+func (d *DraftReviewComment) GetPath() string {
+ if d == nil || d.Path == nil {
+ return ""
+ }
+ return *d.Path
+}
+
+// GetPosition returns the Position field if it's non-nil, zero value otherwise.
+func (d *DraftReviewComment) GetPosition() int {
+ if d == nil || d.Position == nil {
+ return 0
+ }
+ return *d.Position
+}
+
+// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
+func (e *Event) GetCreatedAt() time.Time {
+ if e == nil || e.CreatedAt == nil {
+ return time.Time{}
+ }
+ return *e.CreatedAt
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (e *Event) GetID() string {
+ if e == nil || e.ID == nil {
+ return ""
+ }
+ return *e.ID
+}
+
+// GetPublic returns the Public field if it's non-nil, zero value otherwise.
+func (e *Event) GetPublic() bool {
+ if e == nil || e.Public == nil {
+ return false
+ }
+ return *e.Public
+}
+
+// GetRawPayload returns the RawPayload field if it's non-nil, zero value otherwise.
+func (e *Event) GetRawPayload() json.RawMessage {
+ if e == nil || e.RawPayload == nil {
+ return json.RawMessage{}
+ }
+ return *e.RawPayload
+}
+
+// GetType returns the Type field if it's non-nil, zero value otherwise.
+func (e *Event) GetType() string {
+ if e == nil || e.Type == nil {
+ return ""
+ }
+ return *e.Type
+}
+
+// GetHRef returns the HRef field if it's non-nil, zero value otherwise.
+func (f *FeedLink) GetHRef() string {
+ if f == nil || f.HRef == nil {
+ return ""
+ }
+ return *f.HRef
+}
+
+// GetType returns the Type field if it's non-nil, zero value otherwise.
+func (f *FeedLink) GetType() string {
+ if f == nil || f.Type == nil {
+ return ""
+ }
+ return *f.Type
+}
+
+// GetCurrentUserActorURL returns the CurrentUserActorURL field if it's non-nil, zero value otherwise.
+func (f *Feeds) GetCurrentUserActorURL() string {
+ if f == nil || f.CurrentUserActorURL == nil {
+ return ""
+ }
+ return *f.CurrentUserActorURL
+}
+
+// GetCurrentUserOrganizationURL returns the CurrentUserOrganizationURL field if it's non-nil, zero value otherwise.
+func (f *Feeds) GetCurrentUserOrganizationURL() string {
+ if f == nil || f.CurrentUserOrganizationURL == nil {
+ return ""
+ }
+ return *f.CurrentUserOrganizationURL
+}
+
+// GetCurrentUserPublicURL returns the CurrentUserPublicURL field if it's non-nil, zero value otherwise.
+func (f *Feeds) GetCurrentUserPublicURL() string {
+ if f == nil || f.CurrentUserPublicURL == nil {
+ return ""
+ }
+ return *f.CurrentUserPublicURL
+}
+
+// GetCurrentUserURL returns the CurrentUserURL field if it's non-nil, zero value otherwise.
+func (f *Feeds) GetCurrentUserURL() string {
+ if f == nil || f.CurrentUserURL == nil {
+ return ""
+ }
+ return *f.CurrentUserURL
+}
+
+// GetTimelineURL returns the TimelineURL field if it's non-nil, zero value otherwise.
+func (f *Feeds) GetTimelineURL() string {
+ if f == nil || f.TimelineURL == nil {
+ return ""
+ }
+ return *f.TimelineURL
+}
+
+// GetUserURL returns the UserURL field if it's non-nil, zero value otherwise.
+func (f *Feeds) GetUserURL() string {
+ if f == nil || f.UserURL == nil {
+ return ""
+ }
+ return *f.UserURL
+}
+
+// GetComments returns the Comments field if it's non-nil, zero value otherwise.
+func (g *Gist) GetComments() int {
+ if g == nil || g.Comments == nil {
+ return 0
+ }
+ return *g.Comments
+}
+
+// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
+func (g *Gist) GetCreatedAt() time.Time {
+ if g == nil || g.CreatedAt == nil {
+ return time.Time{}
+ }
+ return *g.CreatedAt
+}
+
+// GetDescription returns the Description field if it's non-nil, zero value otherwise.
+func (g *Gist) GetDescription() string {
+ if g == nil || g.Description == nil {
+ return ""
+ }
+ return *g.Description
+}
+
+// GetGitPullURL returns the GitPullURL field if it's non-nil, zero value otherwise.
+func (g *Gist) GetGitPullURL() string {
+ if g == nil || g.GitPullURL == nil {
+ return ""
+ }
+ return *g.GitPullURL
+}
+
+// GetGitPushURL returns the GitPushURL field if it's non-nil, zero value otherwise.
+func (g *Gist) GetGitPushURL() string {
+ if g == nil || g.GitPushURL == nil {
+ return ""
+ }
+ return *g.GitPushURL
+}
+
+// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise.
+func (g *Gist) GetHTMLURL() string {
+ if g == nil || g.HTMLURL == nil {
+ return ""
+ }
+ return *g.HTMLURL
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (g *Gist) GetID() string {
+ if g == nil || g.ID == nil {
+ return ""
+ }
+ return *g.ID
+}
+
+// GetPublic returns the Public field if it's non-nil, zero value otherwise.
+func (g *Gist) GetPublic() bool {
+ if g == nil || g.Public == nil {
+ return false
+ }
+ return *g.Public
+}
+
+// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise.
+func (g *Gist) GetUpdatedAt() time.Time {
+ if g == nil || g.UpdatedAt == nil {
+ return time.Time{}
+ }
+ return *g.UpdatedAt
+}
+
+// GetBody returns the Body field if it's non-nil, zero value otherwise.
+func (g *GistComment) GetBody() string {
+ if g == nil || g.Body == nil {
+ return ""
+ }
+ return *g.Body
+}
+
+// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
+func (g *GistComment) GetCreatedAt() time.Time {
+ if g == nil || g.CreatedAt == nil {
+ return time.Time{}
+ }
+ return *g.CreatedAt
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (g *GistComment) GetID() int {
+ if g == nil || g.ID == nil {
+ return 0
+ }
+ return *g.ID
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (g *GistComment) GetURL() string {
+ if g == nil || g.URL == nil {
+ return ""
+ }
+ return *g.URL
+}
+
+// GetCommittedAt returns the CommittedAt field if it's non-nil, zero value otherwise.
+func (g *GistCommit) GetCommittedAt() Timestamp {
+ if g == nil || g.CommittedAt == nil {
+ return Timestamp{}
+ }
+ return *g.CommittedAt
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (g *GistCommit) GetURL() string {
+ if g == nil || g.URL == nil {
+ return ""
+ }
+ return *g.URL
+}
+
+// GetVersion returns the Version field if it's non-nil, zero value otherwise.
+func (g *GistCommit) GetVersion() string {
+ if g == nil || g.Version == nil {
+ return ""
+ }
+ return *g.Version
+}
+
+// GetContent returns the Content field if it's non-nil, zero value otherwise.
+func (g *GistFile) GetContent() string {
+ if g == nil || g.Content == nil {
+ return ""
+ }
+ return *g.Content
+}
+
+// GetFilename returns the Filename field if it's non-nil, zero value otherwise.
+func (g *GistFile) GetFilename() string {
+ if g == nil || g.Filename == nil {
+ return ""
+ }
+ return *g.Filename
+}
+
+// GetLanguage returns the Language field if it's non-nil, zero value otherwise.
+func (g *GistFile) GetLanguage() string {
+ if g == nil || g.Language == nil {
+ return ""
+ }
+ return *g.Language
+}
+
+// GetRawURL returns the RawURL field if it's non-nil, zero value otherwise.
+func (g *GistFile) GetRawURL() string {
+ if g == nil || g.RawURL == nil {
+ return ""
+ }
+ return *g.RawURL
+}
+
+// GetSize returns the Size field if it's non-nil, zero value otherwise.
+func (g *GistFile) GetSize() int {
+ if g == nil || g.Size == nil {
+ return 0
+ }
+ return *g.Size
+}
+
+// GetType returns the Type field if it's non-nil, zero value otherwise.
+func (g *GistFile) GetType() string {
+ if g == nil || g.Type == nil {
+ return ""
+ }
+ return *g.Type
+}
+
+// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
+func (g *GistFork) GetCreatedAt() Timestamp {
+ if g == nil || g.CreatedAt == nil {
+ return Timestamp{}
+ }
+ return *g.CreatedAt
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (g *GistFork) GetID() string {
+ if g == nil || g.ID == nil {
+ return ""
+ }
+ return *g.ID
+}
+
+// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise.
+func (g *GistFork) GetUpdatedAt() Timestamp {
+ if g == nil || g.UpdatedAt == nil {
+ return Timestamp{}
+ }
+ return *g.UpdatedAt
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (g *GistFork) GetURL() string {
+ if g == nil || g.URL == nil {
+ return ""
+ }
+ return *g.URL
+}
+
+// GetName returns the Name field if it's non-nil, zero value otherwise.
+func (g *Gitignore) GetName() string {
+ if g == nil || g.Name == nil {
+ return ""
+ }
+ return *g.Name
+}
+
+// GetSource returns the Source field if it's non-nil, zero value otherwise.
+func (g *Gitignore) GetSource() string {
+ if g == nil || g.Source == nil {
+ return ""
+ }
+ return *g.Source
+}
+
+// GetSHA returns the SHA field if it's non-nil, zero value otherwise.
+func (g *GitObject) GetSHA() string {
+ if g == nil || g.SHA == nil {
+ return ""
+ }
+ return *g.SHA
+}
+
+// GetType returns the Type field if it's non-nil, zero value otherwise.
+func (g *GitObject) GetType() string {
+ if g == nil || g.Type == nil {
+ return ""
+ }
+ return *g.Type
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (g *GitObject) GetURL() string {
+ if g == nil || g.URL == nil {
+ return ""
+ }
+ return *g.URL
+}
+
+// GetEmail returns the Email field if it's non-nil, zero value otherwise.
+func (g *GPGEmail) GetEmail() string {
+ if g == nil || g.Email == nil {
+ return ""
+ }
+ return *g.Email
+}
+
+// GetVerified returns the Verified field if it's non-nil, zero value otherwise.
+func (g *GPGEmail) GetVerified() bool {
+ if g == nil || g.Verified == nil {
+ return false
+ }
+ return *g.Verified
+}
+
+// GetCanCertify returns the CanCertify field if it's non-nil, zero value otherwise.
+func (g *GPGKey) GetCanCertify() bool {
+ if g == nil || g.CanCertify == nil {
+ return false
+ }
+ return *g.CanCertify
+}
+
+// GetCanEncryptComms returns the CanEncryptComms field if it's non-nil, zero value otherwise.
+func (g *GPGKey) GetCanEncryptComms() bool {
+ if g == nil || g.CanEncryptComms == nil {
+ return false
+ }
+ return *g.CanEncryptComms
+}
+
+// GetCanEncryptStorage returns the CanEncryptStorage field if it's non-nil, zero value otherwise.
+func (g *GPGKey) GetCanEncryptStorage() bool {
+ if g == nil || g.CanEncryptStorage == nil {
+ return false
+ }
+ return *g.CanEncryptStorage
+}
+
+// GetCanSign returns the CanSign field if it's non-nil, zero value otherwise.
+func (g *GPGKey) GetCanSign() bool {
+ if g == nil || g.CanSign == nil {
+ return false
+ }
+ return *g.CanSign
+}
+
+// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
+func (g *GPGKey) GetCreatedAt() time.Time {
+ if g == nil || g.CreatedAt == nil {
+ return time.Time{}
+ }
+ return *g.CreatedAt
+}
+
+// GetExpiresAt returns the ExpiresAt field if it's non-nil, zero value otherwise.
+func (g *GPGKey) GetExpiresAt() time.Time {
+ if g == nil || g.ExpiresAt == nil {
+ return time.Time{}
+ }
+ return *g.ExpiresAt
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (g *GPGKey) GetID() int {
+ if g == nil || g.ID == nil {
+ return 0
+ }
+ return *g.ID
+}
+
+// GetKeyID returns the KeyID field if it's non-nil, zero value otherwise.
+func (g *GPGKey) GetKeyID() string {
+ if g == nil || g.KeyID == nil {
+ return ""
+ }
+ return *g.KeyID
+}
+
+// GetPrimaryKeyID returns the PrimaryKeyID field if it's non-nil, zero value otherwise.
+func (g *GPGKey) GetPrimaryKeyID() int {
+ if g == nil || g.PrimaryKeyID == nil {
+ return 0
+ }
+ return *g.PrimaryKeyID
+}
+
+// GetPublicKey returns the PublicKey field if it's non-nil, zero value otherwise.
+func (g *GPGKey) GetPublicKey() string {
+ if g == nil || g.PublicKey == nil {
+ return ""
+ }
+ return *g.PublicKey
+}
+
+// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
+func (g *Grant) GetCreatedAt() Timestamp {
+ if g == nil || g.CreatedAt == nil {
+ return Timestamp{}
+ }
+ return *g.CreatedAt
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (g *Grant) GetID() int {
+ if g == nil || g.ID == nil {
+ return 0
+ }
+ return *g.ID
+}
+
+// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise.
+func (g *Grant) GetUpdatedAt() Timestamp {
+ if g == nil || g.UpdatedAt == nil {
+ return Timestamp{}
+ }
+ return *g.UpdatedAt
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (g *Grant) GetURL() string {
+ if g == nil || g.URL == nil {
+ return ""
+ }
+ return *g.URL
+}
+
+// GetActive returns the Active field if it's non-nil, zero value otherwise.
+func (h *Hook) GetActive() bool {
+ if h == nil || h.Active == nil {
+ return false
+ }
+ return *h.Active
+}
+
+// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
+func (h *Hook) GetCreatedAt() time.Time {
+ if h == nil || h.CreatedAt == nil {
+ return time.Time{}
+ }
+ return *h.CreatedAt
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (h *Hook) GetID() int {
+ if h == nil || h.ID == nil {
+ return 0
+ }
+ return *h.ID
+}
+
+// GetName returns the Name field if it's non-nil, zero value otherwise.
+func (h *Hook) GetName() string {
+ if h == nil || h.Name == nil {
+ return ""
+ }
+ return *h.Name
+}
+
+// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise.
+func (h *Hook) GetUpdatedAt() time.Time {
+ if h == nil || h.UpdatedAt == nil {
+ return time.Time{}
+ }
+ return *h.UpdatedAt
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (h *Hook) GetURL() string {
+ if h == nil || h.URL == nil {
+ return ""
+ }
+ return *h.URL
+}
+
+// GetAuthorsCount returns the AuthorsCount field if it's non-nil, zero value otherwise.
+func (i *Import) GetAuthorsCount() int {
+ if i == nil || i.AuthorsCount == nil {
+ return 0
+ }
+ return *i.AuthorsCount
+}
+
+// GetAuthorsURL returns the AuthorsURL field if it's non-nil, zero value otherwise.
+func (i *Import) GetAuthorsURL() string {
+ if i == nil || i.AuthorsURL == nil {
+ return ""
+ }
+ return *i.AuthorsURL
+}
+
+// GetCommitCount returns the CommitCount field if it's non-nil, zero value otherwise.
+func (i *Import) GetCommitCount() int {
+ if i == nil || i.CommitCount == nil {
+ return 0
+ }
+ return *i.CommitCount
+}
+
+// GetFailedStep returns the FailedStep field if it's non-nil, zero value otherwise.
+func (i *Import) GetFailedStep() string {
+ if i == nil || i.FailedStep == nil {
+ return ""
+ }
+ return *i.FailedStep
+}
+
+// GetHasLargeFiles returns the HasLargeFiles field if it's non-nil, zero value otherwise.
+func (i *Import) GetHasLargeFiles() bool {
+ if i == nil || i.HasLargeFiles == nil {
+ return false
+ }
+ return *i.HasLargeFiles
+}
+
+// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise.
+func (i *Import) GetHTMLURL() string {
+ if i == nil || i.HTMLURL == nil {
+ return ""
+ }
+ return *i.HTMLURL
+}
+
+// GetHumanName returns the HumanName field if it's non-nil, zero value otherwise.
+func (i *Import) GetHumanName() string {
+ if i == nil || i.HumanName == nil {
+ return ""
+ }
+ return *i.HumanName
+}
+
+// GetLargeFilesCount returns the LargeFilesCount field if it's non-nil, zero value otherwise.
+func (i *Import) GetLargeFilesCount() int {
+ if i == nil || i.LargeFilesCount == nil {
+ return 0
+ }
+ return *i.LargeFilesCount
+}
+
+// GetLargeFilesSize returns the LargeFilesSize field if it's non-nil, zero value otherwise.
+func (i *Import) GetLargeFilesSize() int {
+ if i == nil || i.LargeFilesSize == nil {
+ return 0
+ }
+ return *i.LargeFilesSize
+}
+
+// GetMessage returns the Message field if it's non-nil, zero value otherwise.
+func (i *Import) GetMessage() string {
+ if i == nil || i.Message == nil {
+ return ""
+ }
+ return *i.Message
+}
+
+// GetPercent returns the Percent field if it's non-nil, zero value otherwise.
+func (i *Import) GetPercent() int {
+ if i == nil || i.Percent == nil {
+ return 0
+ }
+ return *i.Percent
+}
+
+// GetPushPercent returns the PushPercent field if it's non-nil, zero value otherwise.
+func (i *Import) GetPushPercent() int {
+ if i == nil || i.PushPercent == nil {
+ return 0
+ }
+ return *i.PushPercent
+}
+
+// GetRepositoryURL returns the RepositoryURL field if it's non-nil, zero value otherwise.
+func (i *Import) GetRepositoryURL() string {
+ if i == nil || i.RepositoryURL == nil {
+ return ""
+ }
+ return *i.RepositoryURL
+}
+
+// GetStatus returns the Status field if it's non-nil, zero value otherwise.
+func (i *Import) GetStatus() string {
+ if i == nil || i.Status == nil {
+ return ""
+ }
+ return *i.Status
+}
+
+// GetStatusText returns the StatusText field if it's non-nil, zero value otherwise.
+func (i *Import) GetStatusText() string {
+ if i == nil || i.StatusText == nil {
+ return ""
+ }
+ return *i.StatusText
+}
+
+// GetTFVCProject returns the TFVCProject field if it's non-nil, zero value otherwise.
+func (i *Import) GetTFVCProject() string {
+ if i == nil || i.TFVCProject == nil {
+ return ""
+ }
+ return *i.TFVCProject
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (i *Import) GetURL() string {
+ if i == nil || i.URL == nil {
+ return ""
+ }
+ return *i.URL
+}
+
+// GetUseLFS returns the UseLFS field if it's non-nil, zero value otherwise.
+func (i *Import) GetUseLFS() string {
+ if i == nil || i.UseLFS == nil {
+ return ""
+ }
+ return *i.UseLFS
+}
+
+// GetVCS returns the VCS field if it's non-nil, zero value otherwise.
+func (i *Import) GetVCS() string {
+ if i == nil || i.VCS == nil {
+ return ""
+ }
+ return *i.VCS
+}
+
+// GetVCSPassword returns the VCSPassword field if it's non-nil, zero value otherwise.
+func (i *Import) GetVCSPassword() string {
+ if i == nil || i.VCSPassword == nil {
+ return ""
+ }
+ return *i.VCSPassword
+}
+
+// GetVCSURL returns the VCSURL field if it's non-nil, zero value otherwise.
+func (i *Import) GetVCSURL() string {
+ if i == nil || i.VCSURL == nil {
+ return ""
+ }
+ return *i.VCSURL
+}
+
+// GetVCSUsername returns the VCSUsername field if it's non-nil, zero value otherwise.
+func (i *Import) GetVCSUsername() string {
+ if i == nil || i.VCSUsername == nil {
+ return ""
+ }
+ return *i.VCSUsername
+}
+
+// GetAccessTokensURL returns the AccessTokensURL field if it's non-nil, zero value otherwise.
+func (i *Installation) GetAccessTokensURL() string {
+ if i == nil || i.AccessTokensURL == nil {
+ return ""
+ }
+ return *i.AccessTokensURL
+}
+
+// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise.
+func (i *Installation) GetHTMLURL() string {
+ if i == nil || i.HTMLURL == nil {
+ return ""
+ }
+ return *i.HTMLURL
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (i *Installation) GetID() int {
+ if i == nil || i.ID == nil {
+ return 0
+ }
+ return *i.ID
+}
+
+// GetRepositoriesURL returns the RepositoriesURL field if it's non-nil, zero value otherwise.
+func (i *Installation) GetRepositoriesURL() string {
+ if i == nil || i.RepositoriesURL == nil {
+ return ""
+ }
+ return *i.RepositoriesURL
+}
+
+// GetAction returns the Action field if it's non-nil, zero value otherwise.
+func (i *IntegrationInstallationEvent) GetAction() string {
+ if i == nil || i.Action == nil {
+ return ""
+ }
+ return *i.Action
+}
+
+// GetAction returns the Action field if it's non-nil, zero value otherwise.
+func (i *IntegrationInstallationRepositoriesEvent) GetAction() string {
+ if i == nil || i.Action == nil {
+ return ""
+ }
+ return *i.Action
+}
+
+// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
+func (i *Invitation) GetCreatedAt() time.Time {
+ if i == nil || i.CreatedAt == nil {
+ return time.Time{}
+ }
+ return *i.CreatedAt
+}
+
+// GetEmail returns the Email field if it's non-nil, zero value otherwise.
+func (i *Invitation) GetEmail() string {
+ if i == nil || i.Email == nil {
+ return ""
+ }
+ return *i.Email
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (i *Invitation) GetID() int {
+ if i == nil || i.ID == nil {
+ return 0
+ }
+ return *i.ID
+}
+
+// GetLogin returns the Login field if it's non-nil, zero value otherwise.
+func (i *Invitation) GetLogin() string {
+ if i == nil || i.Login == nil {
+ return ""
+ }
+ return *i.Login
+}
+
+// GetRole returns the Role field if it's non-nil, zero value otherwise.
+func (i *Invitation) GetRole() string {
+ if i == nil || i.Role == nil {
+ return ""
+ }
+ return *i.Role
+}
+
+// GetBody returns the Body field if it's non-nil, zero value otherwise.
+func (i *Issue) GetBody() string {
+ if i == nil || i.Body == nil {
+ return ""
+ }
+ return *i.Body
+}
+
+// GetClosedAt returns the ClosedAt field if it's non-nil, zero value otherwise.
+func (i *Issue) GetClosedAt() time.Time {
+ if i == nil || i.ClosedAt == nil {
+ return time.Time{}
+ }
+ return *i.ClosedAt
+}
+
+// GetComments returns the Comments field if it's non-nil, zero value otherwise.
+func (i *Issue) GetComments() int {
+ if i == nil || i.Comments == nil {
+ return 0
+ }
+ return *i.Comments
+}
+
+// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
+func (i *Issue) GetCreatedAt() time.Time {
+ if i == nil || i.CreatedAt == nil {
+ return time.Time{}
+ }
+ return *i.CreatedAt
+}
+
+// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise.
+func (i *Issue) GetHTMLURL() string {
+ if i == nil || i.HTMLURL == nil {
+ return ""
+ }
+ return *i.HTMLURL
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (i *Issue) GetID() int {
+ if i == nil || i.ID == nil {
+ return 0
+ }
+ return *i.ID
+}
+
+// GetLocked returns the Locked field if it's non-nil, zero value otherwise.
+func (i *Issue) GetLocked() bool {
+ if i == nil || i.Locked == nil {
+ return false
+ }
+ return *i.Locked
+}
+
+// GetNumber returns the Number field if it's non-nil, zero value otherwise.
+func (i *Issue) GetNumber() int {
+ if i == nil || i.Number == nil {
+ return 0
+ }
+ return *i.Number
+}
+
+// GetState returns the State field if it's non-nil, zero value otherwise.
+func (i *Issue) GetState() string {
+ if i == nil || i.State == nil {
+ return ""
+ }
+ return *i.State
+}
+
+// GetTitle returns the Title field if it's non-nil, zero value otherwise.
+func (i *Issue) GetTitle() string {
+ if i == nil || i.Title == nil {
+ return ""
+ }
+ return *i.Title
+}
+
+// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise.
+func (i *Issue) GetUpdatedAt() time.Time {
+ if i == nil || i.UpdatedAt == nil {
+ return time.Time{}
+ }
+ return *i.UpdatedAt
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (i *Issue) GetURL() string {
+ if i == nil || i.URL == nil {
+ return ""
+ }
+ return *i.URL
+}
+
+// GetBody returns the Body field if it's non-nil, zero value otherwise.
+func (i *IssueComment) GetBody() string {
+ if i == nil || i.Body == nil {
+ return ""
+ }
+ return *i.Body
+}
+
+// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
+func (i *IssueComment) GetCreatedAt() time.Time {
+ if i == nil || i.CreatedAt == nil {
+ return time.Time{}
+ }
+ return *i.CreatedAt
+}
+
+// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise.
+func (i *IssueComment) GetHTMLURL() string {
+ if i == nil || i.HTMLURL == nil {
+ return ""
+ }
+ return *i.HTMLURL
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (i *IssueComment) GetID() int {
+ if i == nil || i.ID == nil {
+ return 0
+ }
+ return *i.ID
+}
+
+// GetIssueURL returns the IssueURL field if it's non-nil, zero value otherwise.
+func (i *IssueComment) GetIssueURL() string {
+ if i == nil || i.IssueURL == nil {
+ return ""
+ }
+ return *i.IssueURL
+}
+
+// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise.
+func (i *IssueComment) GetUpdatedAt() time.Time {
+ if i == nil || i.UpdatedAt == nil {
+ return time.Time{}
+ }
+ return *i.UpdatedAt
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (i *IssueComment) GetURL() string {
+ if i == nil || i.URL == nil {
+ return ""
+ }
+ return *i.URL
+}
+
+// GetAction returns the Action field if it's non-nil, zero value otherwise.
+func (i *IssueCommentEvent) GetAction() string {
+ if i == nil || i.Action == nil {
+ return ""
+ }
+ return *i.Action
+}
+
+// GetCommitID returns the CommitID field if it's non-nil, zero value otherwise.
+func (i *IssueEvent) GetCommitID() string {
+ if i == nil || i.CommitID == nil {
+ return ""
+ }
+ return *i.CommitID
+}
+
+// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
+func (i *IssueEvent) GetCreatedAt() time.Time {
+ if i == nil || i.CreatedAt == nil {
+ return time.Time{}
+ }
+ return *i.CreatedAt
+}
+
+// GetEvent returns the Event field if it's non-nil, zero value otherwise.
+func (i *IssueEvent) GetEvent() string {
+ if i == nil || i.Event == nil {
+ return ""
+ }
+ return *i.Event
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (i *IssueEvent) GetID() int {
+ if i == nil || i.ID == nil {
+ return 0
+ }
+ return *i.ID
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (i *IssueEvent) GetURL() string {
+ if i == nil || i.URL == nil {
+ return ""
+ }
+ return *i.URL
+}
+
+// GetAssignee returns the Assignee field if it's non-nil, zero value otherwise.
+func (i *IssueRequest) GetAssignee() string {
+ if i == nil || i.Assignee == nil {
+ return ""
+ }
+ return *i.Assignee
+}
+
+// GetAssignees returns the Assignees field if it's non-nil, zero value otherwise.
+func (i *IssueRequest) GetAssignees() []string {
+ if i == nil || i.Assignees == nil {
+ return nil
+ }
+ return *i.Assignees
+}
+
+// GetBody returns the Body field if it's non-nil, zero value otherwise.
+func (i *IssueRequest) GetBody() string {
+ if i == nil || i.Body == nil {
+ return ""
+ }
+ return *i.Body
+}
+
+// GetLabels returns the Labels field if it's non-nil, zero value otherwise.
+func (i *IssueRequest) GetLabels() []string {
+ if i == nil || i.Labels == nil {
+ return nil
+ }
+ return *i.Labels
+}
+
+// GetMilestone returns the Milestone field if it's non-nil, zero value otherwise.
+func (i *IssueRequest) GetMilestone() int {
+ if i == nil || i.Milestone == nil {
+ return 0
+ }
+ return *i.Milestone
+}
+
+// GetState returns the State field if it's non-nil, zero value otherwise.
+func (i *IssueRequest) GetState() string {
+ if i == nil || i.State == nil {
+ return ""
+ }
+ return *i.State
+}
+
+// GetTitle returns the Title field if it's non-nil, zero value otherwise.
+func (i *IssueRequest) GetTitle() string {
+ if i == nil || i.Title == nil {
+ return ""
+ }
+ return *i.Title
+}
+
+// GetAction returns the Action field if it's non-nil, zero value otherwise.
+func (i *IssuesEvent) GetAction() string {
+ if i == nil || i.Action == nil {
+ return ""
+ }
+ return *i.Action
+}
+
+// GetIncompleteResults returns the IncompleteResults field if it's non-nil, zero value otherwise.
+func (i *IssuesSearchResult) GetIncompleteResults() bool {
+ if i == nil || i.IncompleteResults == nil {
+ return false
+ }
+ return *i.IncompleteResults
+}
+
+// GetTotal returns the Total field if it's non-nil, zero value otherwise.
+func (i *IssuesSearchResult) GetTotal() int {
+ if i == nil || i.Total == nil {
+ return 0
+ }
+ return *i.Total
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (k *Key) GetID() int {
+ if k == nil || k.ID == nil {
+ return 0
+ }
+ return *k.ID
+}
+
+// GetKey returns the Key field if it's non-nil, zero value otherwise.
+func (k *Key) GetKey() string {
+ if k == nil || k.Key == nil {
+ return ""
+ }
+ return *k.Key
+}
+
+// GetReadOnly returns the ReadOnly field if it's non-nil, zero value otherwise.
+func (k *Key) GetReadOnly() bool {
+ if k == nil || k.ReadOnly == nil {
+ return false
+ }
+ return *k.ReadOnly
+}
+
+// GetTitle returns the Title field if it's non-nil, zero value otherwise.
+func (k *Key) GetTitle() string {
+ if k == nil || k.Title == nil {
+ return ""
+ }
+ return *k.Title
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (k *Key) GetURL() string {
+ if k == nil || k.URL == nil {
+ return ""
+ }
+ return *k.URL
+}
+
+// GetColor returns the Color field if it's non-nil, zero value otherwise.
+func (l *Label) GetColor() string {
+ if l == nil || l.Color == nil {
+ return ""
+ }
+ return *l.Color
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (l *Label) GetID() int {
+ if l == nil || l.ID == nil {
+ return 0
+ }
+ return *l.ID
+}
+
+// GetName returns the Name field if it's non-nil, zero value otherwise.
+func (l *Label) GetName() string {
+ if l == nil || l.Name == nil {
+ return ""
+ }
+ return *l.Name
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (l *Label) GetURL() string {
+ if l == nil || l.URL == nil {
+ return ""
+ }
+ return *l.URL
+}
+
+// GetAction returns the Action field if it's non-nil, zero value otherwise.
+func (l *LabelEvent) GetAction() string {
+ if l == nil || l.Action == nil {
+ return ""
+ }
+ return *l.Action
+}
+
+// GetOID returns the OID field if it's non-nil, zero value otherwise.
+func (l *LargeFile) GetOID() string {
+ if l == nil || l.OID == nil {
+ return ""
+ }
+ return *l.OID
+}
+
+// GetPath returns the Path field if it's non-nil, zero value otherwise.
+func (l *LargeFile) GetPath() string {
+ if l == nil || l.Path == nil {
+ return ""
+ }
+ return *l.Path
+}
+
+// GetRefName returns the RefName field if it's non-nil, zero value otherwise.
+func (l *LargeFile) GetRefName() string {
+ if l == nil || l.RefName == nil {
+ return ""
+ }
+ return *l.RefName
+}
+
+// GetSize returns the Size field if it's non-nil, zero value otherwise.
+func (l *LargeFile) GetSize() int {
+ if l == nil || l.Size == nil {
+ return 0
+ }
+ return *l.Size
+}
+
+// GetBody returns the Body field if it's non-nil, zero value otherwise.
+func (l *License) GetBody() string {
+ if l == nil || l.Body == nil {
+ return ""
+ }
+ return *l.Body
+}
+
+// GetConditions returns the Conditions field if it's non-nil, zero value otherwise.
+func (l *License) GetConditions() []string {
+ if l == nil || l.Conditions == nil {
+ return nil
+ }
+ return *l.Conditions
+}
+
+// GetDescription returns the Description field if it's non-nil, zero value otherwise.
+func (l *License) GetDescription() string {
+ if l == nil || l.Description == nil {
+ return ""
+ }
+ return *l.Description
+}
+
+// GetFeatured returns the Featured field if it's non-nil, zero value otherwise.
+func (l *License) GetFeatured() bool {
+ if l == nil || l.Featured == nil {
+ return false
+ }
+ return *l.Featured
+}
+
+// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise.
+func (l *License) GetHTMLURL() string {
+ if l == nil || l.HTMLURL == nil {
+ return ""
+ }
+ return *l.HTMLURL
+}
+
+// GetImplementation returns the Implementation field if it's non-nil, zero value otherwise.
+func (l *License) GetImplementation() string {
+ if l == nil || l.Implementation == nil {
+ return ""
+ }
+ return *l.Implementation
+}
+
+// GetKey returns the Key field if it's non-nil, zero value otherwise.
+func (l *License) GetKey() string {
+ if l == nil || l.Key == nil {
+ return ""
+ }
+ return *l.Key
+}
+
+// GetLimitations returns the Limitations field if it's non-nil, zero value otherwise.
+func (l *License) GetLimitations() []string {
+ if l == nil || l.Limitations == nil {
+ return nil
+ }
+ return *l.Limitations
+}
+
+// GetName returns the Name field if it's non-nil, zero value otherwise.
+func (l *License) GetName() string {
+ if l == nil || l.Name == nil {
+ return ""
+ }
+ return *l.Name
+}
+
+// GetPermissions returns the Permissions field if it's non-nil, zero value otherwise.
+func (l *License) GetPermissions() []string {
+ if l == nil || l.Permissions == nil {
+ return nil
+ }
+ return *l.Permissions
+}
+
+// GetSPDXID returns the SPDXID field if it's non-nil, zero value otherwise.
+func (l *License) GetSPDXID() string {
+ if l == nil || l.SPDXID == nil {
+ return ""
+ }
+ return *l.SPDXID
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (l *License) GetURL() string {
+ if l == nil || l.URL == nil {
+ return ""
+ }
+ return *l.URL
+}
+
+// GetContext returns the Context field if it's non-nil, zero value otherwise.
+func (m *markdownRequest) GetContext() string {
+ if m == nil || m.Context == nil {
+ return ""
+ }
+ return *m.Context
+}
+
+// GetMode returns the Mode field if it's non-nil, zero value otherwise.
+func (m *markdownRequest) GetMode() string {
+ if m == nil || m.Mode == nil {
+ return ""
+ }
+ return *m.Mode
+}
+
+// GetText returns the Text field if it's non-nil, zero value otherwise.
+func (m *markdownRequest) GetText() string {
+ if m == nil || m.Text == nil {
+ return ""
+ }
+ return *m.Text
+}
+
+// GetText returns the Text field if it's non-nil, zero value otherwise.
+func (m *Match) GetText() string {
+ if m == nil || m.Text == nil {
+ return ""
+ }
+ return *m.Text
+}
+
+// GetAction returns the Action field if it's non-nil, zero value otherwise.
+func (m *MemberEvent) GetAction() string {
+ if m == nil || m.Action == nil {
+ return ""
+ }
+ return *m.Action
+}
+
+// GetOrganizationURL returns the OrganizationURL field if it's non-nil, zero value otherwise.
+func (m *Membership) GetOrganizationURL() string {
+ if m == nil || m.OrganizationURL == nil {
+ return ""
+ }
+ return *m.OrganizationURL
+}
+
+// GetRole returns the Role field if it's non-nil, zero value otherwise.
+func (m *Membership) GetRole() string {
+ if m == nil || m.Role == nil {
+ return ""
+ }
+ return *m.Role
+}
+
+// GetState returns the State field if it's non-nil, zero value otherwise.
+func (m *Membership) GetState() string {
+ if m == nil || m.State == nil {
+ return ""
+ }
+ return *m.State
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (m *Membership) GetURL() string {
+ if m == nil || m.URL == nil {
+ return ""
+ }
+ return *m.URL
+}
+
+// GetAction returns the Action field if it's non-nil, zero value otherwise.
+func (m *MembershipEvent) GetAction() string {
+ if m == nil || m.Action == nil {
+ return ""
+ }
+ return *m.Action
+}
+
+// GetScope returns the Scope field if it's non-nil, zero value otherwise.
+func (m *MembershipEvent) GetScope() string {
+ if m == nil || m.Scope == nil {
+ return ""
+ }
+ return *m.Scope
+}
+
+// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
+func (m *Migration) GetCreatedAt() string {
+ if m == nil || m.CreatedAt == nil {
+ return ""
+ }
+ return *m.CreatedAt
+}
+
+// GetExcludeAttachments returns the ExcludeAttachments field if it's non-nil, zero value otherwise.
+func (m *Migration) GetExcludeAttachments() bool {
+ if m == nil || m.ExcludeAttachments == nil {
+ return false
+ }
+ return *m.ExcludeAttachments
+}
+
+// GetGUID returns the GUID field if it's non-nil, zero value otherwise.
+func (m *Migration) GetGUID() string {
+ if m == nil || m.GUID == nil {
+ return ""
+ }
+ return *m.GUID
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (m *Migration) GetID() int {
+ if m == nil || m.ID == nil {
+ return 0
+ }
+ return *m.ID
+}
+
+// GetLockRepositories returns the LockRepositories field if it's non-nil, zero value otherwise.
+func (m *Migration) GetLockRepositories() bool {
+ if m == nil || m.LockRepositories == nil {
+ return false
+ }
+ return *m.LockRepositories
+}
+
+// GetState returns the State field if it's non-nil, zero value otherwise.
+func (m *Migration) GetState() string {
+ if m == nil || m.State == nil {
+ return ""
+ }
+ return *m.State
+}
+
+// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise.
+func (m *Migration) GetUpdatedAt() string {
+ if m == nil || m.UpdatedAt == nil {
+ return ""
+ }
+ return *m.UpdatedAt
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (m *Migration) GetURL() string {
+ if m == nil || m.URL == nil {
+ return ""
+ }
+ return *m.URL
+}
+
+// GetClosedAt returns the ClosedAt field if it's non-nil, zero value otherwise.
+func (m *Milestone) GetClosedAt() time.Time {
+ if m == nil || m.ClosedAt == nil {
+ return time.Time{}
+ }
+ return *m.ClosedAt
+}
+
+// GetClosedIssues returns the ClosedIssues field if it's non-nil, zero value otherwise.
+func (m *Milestone) GetClosedIssues() int {
+ if m == nil || m.ClosedIssues == nil {
+ return 0
+ }
+ return *m.ClosedIssues
+}
+
+// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
+func (m *Milestone) GetCreatedAt() time.Time {
+ if m == nil || m.CreatedAt == nil {
+ return time.Time{}
+ }
+ return *m.CreatedAt
+}
+
+// GetDescription returns the Description field if it's non-nil, zero value otherwise.
+func (m *Milestone) GetDescription() string {
+ if m == nil || m.Description == nil {
+ return ""
+ }
+ return *m.Description
+}
+
+// GetDueOn returns the DueOn field if it's non-nil, zero value otherwise.
+func (m *Milestone) GetDueOn() time.Time {
+ if m == nil || m.DueOn == nil {
+ return time.Time{}
+ }
+ return *m.DueOn
+}
+
+// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise.
+func (m *Milestone) GetHTMLURL() string {
+ if m == nil || m.HTMLURL == nil {
+ return ""
+ }
+ return *m.HTMLURL
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (m *Milestone) GetID() int {
+ if m == nil || m.ID == nil {
+ return 0
+ }
+ return *m.ID
+}
+
+// GetLabelsURL returns the LabelsURL field if it's non-nil, zero value otherwise.
+func (m *Milestone) GetLabelsURL() string {
+ if m == nil || m.LabelsURL == nil {
+ return ""
+ }
+ return *m.LabelsURL
+}
+
+// GetNumber returns the Number field if it's non-nil, zero value otherwise.
+func (m *Milestone) GetNumber() int {
+ if m == nil || m.Number == nil {
+ return 0
+ }
+ return *m.Number
+}
+
+// GetOpenIssues returns the OpenIssues field if it's non-nil, zero value otherwise.
+func (m *Milestone) GetOpenIssues() int {
+ if m == nil || m.OpenIssues == nil {
+ return 0
+ }
+ return *m.OpenIssues
+}
+
+// GetState returns the State field if it's non-nil, zero value otherwise.
+func (m *Milestone) GetState() string {
+ if m == nil || m.State == nil {
+ return ""
+ }
+ return *m.State
+}
+
+// GetTitle returns the Title field if it's non-nil, zero value otherwise.
+func (m *Milestone) GetTitle() string {
+ if m == nil || m.Title == nil {
+ return ""
+ }
+ return *m.Title
+}
+
+// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise.
+func (m *Milestone) GetUpdatedAt() time.Time {
+ if m == nil || m.UpdatedAt == nil {
+ return time.Time{}
+ }
+ return *m.UpdatedAt
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (m *Milestone) GetURL() string {
+ if m == nil || m.URL == nil {
+ return ""
+ }
+ return *m.URL
+}
+
+// GetAction returns the Action field if it's non-nil, zero value otherwise.
+func (m *MilestoneEvent) GetAction() string {
+ if m == nil || m.Action == nil {
+ return ""
+ }
+ return *m.Action
+}
+
+// GetBase returns the Base field if it's non-nil, zero value otherwise.
+func (n *NewPullRequest) GetBase() string {
+ if n == nil || n.Base == nil {
+ return ""
+ }
+ return *n.Base
+}
+
+// GetBody returns the Body field if it's non-nil, zero value otherwise.
+func (n *NewPullRequest) GetBody() string {
+ if n == nil || n.Body == nil {
+ return ""
+ }
+ return *n.Body
+}
+
+// GetHead returns the Head field if it's non-nil, zero value otherwise.
+func (n *NewPullRequest) GetHead() string {
+ if n == nil || n.Head == nil {
+ return ""
+ }
+ return *n.Head
+}
+
+// GetIssue returns the Issue field if it's non-nil, zero value otherwise.
+func (n *NewPullRequest) GetIssue() int {
+ if n == nil || n.Issue == nil {
+ return 0
+ }
+ return *n.Issue
+}
+
+// GetMaintainerCanModify returns the MaintainerCanModify field if it's non-nil, zero value otherwise.
+func (n *NewPullRequest) GetMaintainerCanModify() bool {
+ if n == nil || n.MaintainerCanModify == nil {
+ return false
+ }
+ return *n.MaintainerCanModify
+}
+
+// GetTitle returns the Title field if it's non-nil, zero value otherwise.
+func (n *NewPullRequest) GetTitle() string {
+ if n == nil || n.Title == nil {
+ return ""
+ }
+ return *n.Title
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (n *Notification) GetID() string {
+ if n == nil || n.ID == nil {
+ return ""
+ }
+ return *n.ID
+}
+
+// GetLastReadAt returns the LastReadAt field if it's non-nil, zero value otherwise.
+func (n *Notification) GetLastReadAt() time.Time {
+ if n == nil || n.LastReadAt == nil {
+ return time.Time{}
+ }
+ return *n.LastReadAt
+}
+
+// GetReason returns the Reason field if it's non-nil, zero value otherwise.
+func (n *Notification) GetReason() string {
+ if n == nil || n.Reason == nil {
+ return ""
+ }
+ return *n.Reason
+}
+
+// GetUnread returns the Unread field if it's non-nil, zero value otherwise.
+func (n *Notification) GetUnread() bool {
+ if n == nil || n.Unread == nil {
+ return false
+ }
+ return *n.Unread
+}
+
+// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise.
+func (n *Notification) GetUpdatedAt() time.Time {
+ if n == nil || n.UpdatedAt == nil {
+ return time.Time{}
+ }
+ return *n.UpdatedAt
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (n *Notification) GetURL() string {
+ if n == nil || n.URL == nil {
+ return ""
+ }
+ return *n.URL
+}
+
+// GetLatestCommentURL returns the LatestCommentURL field if it's non-nil, zero value otherwise.
+func (n *NotificationSubject) GetLatestCommentURL() string {
+ if n == nil || n.LatestCommentURL == nil {
+ return ""
+ }
+ return *n.LatestCommentURL
+}
+
+// GetTitle returns the Title field if it's non-nil, zero value otherwise.
+func (n *NotificationSubject) GetTitle() string {
+ if n == nil || n.Title == nil {
+ return ""
+ }
+ return *n.Title
+}
+
+// GetType returns the Type field if it's non-nil, zero value otherwise.
+func (n *NotificationSubject) GetType() string {
+ if n == nil || n.Type == nil {
+ return ""
+ }
+ return *n.Type
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (n *NotificationSubject) GetURL() string {
+ if n == nil || n.URL == nil {
+ return ""
+ }
+ return *n.URL
+}
+
+// GetAvatarURL returns the AvatarURL field if it's non-nil, zero value otherwise.
+func (o *Organization) GetAvatarURL() string {
+ if o == nil || o.AvatarURL == nil {
+ return ""
+ }
+ return *o.AvatarURL
+}
+
+// GetBillingEmail returns the BillingEmail field if it's non-nil, zero value otherwise.
+func (o *Organization) GetBillingEmail() string {
+ if o == nil || o.BillingEmail == nil {
+ return ""
+ }
+ return *o.BillingEmail
+}
+
+// GetBlog returns the Blog field if it's non-nil, zero value otherwise.
+func (o *Organization) GetBlog() string {
+ if o == nil || o.Blog == nil {
+ return ""
+ }
+ return *o.Blog
+}
+
+// GetCollaborators returns the Collaborators field if it's non-nil, zero value otherwise.
+func (o *Organization) GetCollaborators() int {
+ if o == nil || o.Collaborators == nil {
+ return 0
+ }
+ return *o.Collaborators
+}
+
+// GetCompany returns the Company field if it's non-nil, zero value otherwise.
+func (o *Organization) GetCompany() string {
+ if o == nil || o.Company == nil {
+ return ""
+ }
+ return *o.Company
+}
+
+// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
+func (o *Organization) GetCreatedAt() time.Time {
+ if o == nil || o.CreatedAt == nil {
+ return time.Time{}
+ }
+ return *o.CreatedAt
+}
+
+// GetDescription returns the Description field if it's non-nil, zero value otherwise.
+func (o *Organization) GetDescription() string {
+ if o == nil || o.Description == nil {
+ return ""
+ }
+ return *o.Description
+}
+
+// GetDiskUsage returns the DiskUsage field if it's non-nil, zero value otherwise.
+func (o *Organization) GetDiskUsage() int {
+ if o == nil || o.DiskUsage == nil {
+ return 0
+ }
+ return *o.DiskUsage
+}
+
+// GetEmail returns the Email field if it's non-nil, zero value otherwise.
+func (o *Organization) GetEmail() string {
+ if o == nil || o.Email == nil {
+ return ""
+ }
+ return *o.Email
+}
+
+// GetEventsURL returns the EventsURL field if it's non-nil, zero value otherwise.
+func (o *Organization) GetEventsURL() string {
+ if o == nil || o.EventsURL == nil {
+ return ""
+ }
+ return *o.EventsURL
+}
+
+// GetFollowers returns the Followers field if it's non-nil, zero value otherwise.
+func (o *Organization) GetFollowers() int {
+ if o == nil || o.Followers == nil {
+ return 0
+ }
+ return *o.Followers
+}
+
+// GetFollowing returns the Following field if it's non-nil, zero value otherwise.
+func (o *Organization) GetFollowing() int {
+ if o == nil || o.Following == nil {
+ return 0
+ }
+ return *o.Following
+}
+
+// GetHooksURL returns the HooksURL field if it's non-nil, zero value otherwise.
+func (o *Organization) GetHooksURL() string {
+ if o == nil || o.HooksURL == nil {
+ return ""
+ }
+ return *o.HooksURL
+}
+
+// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise.
+func (o *Organization) GetHTMLURL() string {
+ if o == nil || o.HTMLURL == nil {
+ return ""
+ }
+ return *o.HTMLURL
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (o *Organization) GetID() int {
+ if o == nil || o.ID == nil {
+ return 0
+ }
+ return *o.ID
+}
+
+// GetIssuesURL returns the IssuesURL field if it's non-nil, zero value otherwise.
+func (o *Organization) GetIssuesURL() string {
+ if o == nil || o.IssuesURL == nil {
+ return ""
+ }
+ return *o.IssuesURL
+}
+
+// GetLocation returns the Location field if it's non-nil, zero value otherwise.
+func (o *Organization) GetLocation() string {
+ if o == nil || o.Location == nil {
+ return ""
+ }
+ return *o.Location
+}
+
+// GetLogin returns the Login field if it's non-nil, zero value otherwise.
+func (o *Organization) GetLogin() string {
+ if o == nil || o.Login == nil {
+ return ""
+ }
+ return *o.Login
+}
+
+// GetMembersURL returns the MembersURL field if it's non-nil, zero value otherwise.
+func (o *Organization) GetMembersURL() string {
+ if o == nil || o.MembersURL == nil {
+ return ""
+ }
+ return *o.MembersURL
+}
+
+// GetName returns the Name field if it's non-nil, zero value otherwise.
+func (o *Organization) GetName() string {
+ if o == nil || o.Name == nil {
+ return ""
+ }
+ return *o.Name
+}
+
+// GetOwnedPrivateRepos returns the OwnedPrivateRepos field if it's non-nil, zero value otherwise.
+func (o *Organization) GetOwnedPrivateRepos() int {
+ if o == nil || o.OwnedPrivateRepos == nil {
+ return 0
+ }
+ return *o.OwnedPrivateRepos
+}
+
+// GetPrivateGists returns the PrivateGists field if it's non-nil, zero value otherwise.
+func (o *Organization) GetPrivateGists() int {
+ if o == nil || o.PrivateGists == nil {
+ return 0
+ }
+ return *o.PrivateGists
+}
+
+// GetPublicGists returns the PublicGists field if it's non-nil, zero value otherwise.
+func (o *Organization) GetPublicGists() int {
+ if o == nil || o.PublicGists == nil {
+ return 0
+ }
+ return *o.PublicGists
+}
+
+// GetPublicMembersURL returns the PublicMembersURL field if it's non-nil, zero value otherwise.
+func (o *Organization) GetPublicMembersURL() string {
+ if o == nil || o.PublicMembersURL == nil {
+ return ""
+ }
+ return *o.PublicMembersURL
+}
+
+// GetPublicRepos returns the PublicRepos field if it's non-nil, zero value otherwise.
+func (o *Organization) GetPublicRepos() int {
+ if o == nil || o.PublicRepos == nil {
+ return 0
+ }
+ return *o.PublicRepos
+}
+
+// GetReposURL returns the ReposURL field if it's non-nil, zero value otherwise.
+func (o *Organization) GetReposURL() string {
+ if o == nil || o.ReposURL == nil {
+ return ""
+ }
+ return *o.ReposURL
+}
+
+// GetTotalPrivateRepos returns the TotalPrivateRepos field if it's non-nil, zero value otherwise.
+func (o *Organization) GetTotalPrivateRepos() int {
+ if o == nil || o.TotalPrivateRepos == nil {
+ return 0
+ }
+ return *o.TotalPrivateRepos
+}
+
+// GetType returns the Type field if it's non-nil, zero value otherwise.
+func (o *Organization) GetType() string {
+ if o == nil || o.Type == nil {
+ return ""
+ }
+ return *o.Type
+}
+
+// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise.
+func (o *Organization) GetUpdatedAt() time.Time {
+ if o == nil || o.UpdatedAt == nil {
+ return time.Time{}
+ }
+ return *o.UpdatedAt
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (o *Organization) GetURL() string {
+ if o == nil || o.URL == nil {
+ return ""
+ }
+ return *o.URL
+}
+
+// GetAction returns the Action field if it's non-nil, zero value otherwise.
+func (o *OrganizationEvent) GetAction() string {
+ if o == nil || o.Action == nil {
+ return ""
+ }
+ return *o.Action
+}
+
+// GetAction returns the Action field if it's non-nil, zero value otherwise.
+func (p *Page) GetAction() string {
+ if p == nil || p.Action == nil {
+ return ""
+ }
+ return *p.Action
+}
+
+// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise.
+func (p *Page) GetHTMLURL() string {
+ if p == nil || p.HTMLURL == nil {
+ return ""
+ }
+ return *p.HTMLURL
+}
+
+// GetPageName returns the PageName field if it's non-nil, zero value otherwise.
+func (p *Page) GetPageName() string {
+ if p == nil || p.PageName == nil {
+ return ""
+ }
+ return *p.PageName
+}
+
+// GetSHA returns the SHA field if it's non-nil, zero value otherwise.
+func (p *Page) GetSHA() string {
+ if p == nil || p.SHA == nil {
+ return ""
+ }
+ return *p.SHA
+}
+
+// GetSummary returns the Summary field if it's non-nil, zero value otherwise.
+func (p *Page) GetSummary() string {
+ if p == nil || p.Summary == nil {
+ return ""
+ }
+ return *p.Summary
+}
+
+// GetTitle returns the Title field if it's non-nil, zero value otherwise.
+func (p *Page) GetTitle() string {
+ if p == nil || p.Title == nil {
+ return ""
+ }
+ return *p.Title
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (p *PageBuildEvent) GetID() int {
+ if p == nil || p.ID == nil {
+ return 0
+ }
+ return *p.ID
+}
+
+// GetCNAME returns the CNAME field if it's non-nil, zero value otherwise.
+func (p *Pages) GetCNAME() string {
+ if p == nil || p.CNAME == nil {
+ return ""
+ }
+ return *p.CNAME
+}
+
+// GetCustom404 returns the Custom404 field if it's non-nil, zero value otherwise.
+func (p *Pages) GetCustom404() bool {
+ if p == nil || p.Custom404 == nil {
+ return false
+ }
+ return *p.Custom404
+}
+
+// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise.
+func (p *Pages) GetHTMLURL() string {
+ if p == nil || p.HTMLURL == nil {
+ return ""
+ }
+ return *p.HTMLURL
+}
+
+// GetStatus returns the Status field if it's non-nil, zero value otherwise.
+func (p *Pages) GetStatus() string {
+ if p == nil || p.Status == nil {
+ return ""
+ }
+ return *p.Status
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (p *Pages) GetURL() string {
+ if p == nil || p.URL == nil {
+ return ""
+ }
+ return *p.URL
+}
+
+// GetCommit returns the Commit field if it's non-nil, zero value otherwise.
+func (p *PagesBuild) GetCommit() string {
+ if p == nil || p.Commit == nil {
+ return ""
+ }
+ return *p.Commit
+}
+
+// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
+func (p *PagesBuild) GetCreatedAt() Timestamp {
+ if p == nil || p.CreatedAt == nil {
+ return Timestamp{}
+ }
+ return *p.CreatedAt
+}
+
+// GetDuration returns the Duration field if it's non-nil, zero value otherwise.
+func (p *PagesBuild) GetDuration() int {
+ if p == nil || p.Duration == nil {
+ return 0
+ }
+ return *p.Duration
+}
+
+// GetStatus returns the Status field if it's non-nil, zero value otherwise.
+func (p *PagesBuild) GetStatus() string {
+ if p == nil || p.Status == nil {
+ return ""
+ }
+ return *p.Status
+}
+
+// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise.
+func (p *PagesBuild) GetUpdatedAt() Timestamp {
+ if p == nil || p.UpdatedAt == nil {
+ return Timestamp{}
+ }
+ return *p.UpdatedAt
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (p *PagesBuild) GetURL() string {
+ if p == nil || p.URL == nil {
+ return ""
+ }
+ return *p.URL
+}
+
+// GetMessage returns the Message field if it's non-nil, zero value otherwise.
+func (p *PagesError) GetMessage() string {
+ if p == nil || p.Message == nil {
+ return ""
+ }
+ return *p.Message
+}
+
+// GetHookID returns the HookID field if it's non-nil, zero value otherwise.
+func (p *PingEvent) GetHookID() int {
+ if p == nil || p.HookID == nil {
+ return 0
+ }
+ return *p.HookID
+}
+
+// GetZen returns the Zen field if it's non-nil, zero value otherwise.
+func (p *PingEvent) GetZen() string {
+ if p == nil || p.Zen == nil {
+ return ""
+ }
+ return *p.Zen
+}
+
+// GetCollaborators returns the Collaborators field if it's non-nil, zero value otherwise.
+func (p *Plan) GetCollaborators() int {
+ if p == nil || p.Collaborators == nil {
+ return 0
+ }
+ return *p.Collaborators
+}
+
+// GetName returns the Name field if it's non-nil, zero value otherwise.
+func (p *Plan) GetName() string {
+ if p == nil || p.Name == nil {
+ return ""
+ }
+ return *p.Name
+}
+
+// GetPrivateRepos returns the PrivateRepos field if it's non-nil, zero value otherwise.
+func (p *Plan) GetPrivateRepos() int {
+ if p == nil || p.PrivateRepos == nil {
+ return 0
+ }
+ return *p.PrivateRepos
+}
+
+// GetSpace returns the Space field if it's non-nil, zero value otherwise.
+func (p *Plan) GetSpace() int {
+ if p == nil || p.Space == nil {
+ return 0
+ }
+ return *p.Space
+}
+
+// GetBody returns the Body field if it's non-nil, zero value otherwise.
+func (p *Project) GetBody() string {
+ if p == nil || p.Body == nil {
+ return ""
+ }
+ return *p.Body
+}
+
+// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
+func (p *Project) GetCreatedAt() Timestamp {
+ if p == nil || p.CreatedAt == nil {
+ return Timestamp{}
+ }
+ return *p.CreatedAt
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (p *Project) GetID() int {
+ if p == nil || p.ID == nil {
+ return 0
+ }
+ return *p.ID
+}
+
+// GetName returns the Name field if it's non-nil, zero value otherwise.
+func (p *Project) GetName() string {
+ if p == nil || p.Name == nil {
+ return ""
+ }
+ return *p.Name
+}
+
+// GetNumber returns the Number field if it's non-nil, zero value otherwise.
+func (p *Project) GetNumber() int {
+ if p == nil || p.Number == nil {
+ return 0
+ }
+ return *p.Number
+}
+
+// GetOwnerURL returns the OwnerURL field if it's non-nil, zero value otherwise.
+func (p *Project) GetOwnerURL() string {
+ if p == nil || p.OwnerURL == nil {
+ return ""
+ }
+ return *p.OwnerURL
+}
+
+// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise.
+func (p *Project) GetUpdatedAt() Timestamp {
+ if p == nil || p.UpdatedAt == nil {
+ return Timestamp{}
+ }
+ return *p.UpdatedAt
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (p *Project) GetURL() string {
+ if p == nil || p.URL == nil {
+ return ""
+ }
+ return *p.URL
+}
+
+// GetColumnURL returns the ColumnURL field if it's non-nil, zero value otherwise.
+func (p *ProjectCard) GetColumnURL() string {
+ if p == nil || p.ColumnURL == nil {
+ return ""
+ }
+ return *p.ColumnURL
+}
+
+// GetContentURL returns the ContentURL field if it's non-nil, zero value otherwise.
+func (p *ProjectCard) GetContentURL() string {
+ if p == nil || p.ContentURL == nil {
+ return ""
+ }
+ return *p.ContentURL
+}
+
+// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
+func (p *ProjectCard) GetCreatedAt() Timestamp {
+ if p == nil || p.CreatedAt == nil {
+ return Timestamp{}
+ }
+ return *p.CreatedAt
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (p *ProjectCard) GetID() int {
+ if p == nil || p.ID == nil {
+ return 0
+ }
+ return *p.ID
+}
+
+// GetNote returns the Note field if it's non-nil, zero value otherwise.
+func (p *ProjectCard) GetNote() string {
+ if p == nil || p.Note == nil {
+ return ""
+ }
+ return *p.Note
+}
+
+// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise.
+func (p *ProjectCard) GetUpdatedAt() Timestamp {
+ if p == nil || p.UpdatedAt == nil {
+ return Timestamp{}
+ }
+ return *p.UpdatedAt
+}
+
+// GetAction returns the Action field if it's non-nil, zero value otherwise.
+func (p *ProjectCardEvent) GetAction() string {
+ if p == nil || p.Action == nil {
+ return ""
+ }
+ return *p.Action
+}
+
+// GetAfterID returns the AfterID field if it's non-nil, zero value otherwise.
+func (p *ProjectCardEvent) GetAfterID() int {
+ if p == nil || p.AfterID == nil {
+ return 0
+ }
+ return *p.AfterID
+}
+
+// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
+func (p *ProjectColumn) GetCreatedAt() Timestamp {
+ if p == nil || p.CreatedAt == nil {
+ return Timestamp{}
+ }
+ return *p.CreatedAt
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (p *ProjectColumn) GetID() int {
+ if p == nil || p.ID == nil {
+ return 0
+ }
+ return *p.ID
+}
+
+// GetName returns the Name field if it's non-nil, zero value otherwise.
+func (p *ProjectColumn) GetName() string {
+ if p == nil || p.Name == nil {
+ return ""
+ }
+ return *p.Name
+}
+
+// GetProjectURL returns the ProjectURL field if it's non-nil, zero value otherwise.
+func (p *ProjectColumn) GetProjectURL() string {
+ if p == nil || p.ProjectURL == nil {
+ return ""
+ }
+ return *p.ProjectURL
+}
+
+// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise.
+func (p *ProjectColumn) GetUpdatedAt() Timestamp {
+ if p == nil || p.UpdatedAt == nil {
+ return Timestamp{}
+ }
+ return *p.UpdatedAt
+}
+
+// GetAction returns the Action field if it's non-nil, zero value otherwise.
+func (p *ProjectColumnEvent) GetAction() string {
+ if p == nil || p.Action == nil {
+ return ""
+ }
+ return *p.Action
+}
+
+// GetAfterID returns the AfterID field if it's non-nil, zero value otherwise.
+func (p *ProjectColumnEvent) GetAfterID() int {
+ if p == nil || p.AfterID == nil {
+ return 0
+ }
+ return *p.AfterID
+}
+
+// GetAction returns the Action field if it's non-nil, zero value otherwise.
+func (p *ProjectEvent) GetAction() string {
+ if p == nil || p.Action == nil {
+ return ""
+ }
+ return *p.Action
+}
+
+// GetAdditions returns the Additions field if it's non-nil, zero value otherwise.
+func (p *PullRequest) GetAdditions() int {
+ if p == nil || p.Additions == nil {
+ return 0
+ }
+ return *p.Additions
+}
+
+// GetBody returns the Body field if it's non-nil, zero value otherwise.
+func (p *PullRequest) GetBody() string {
+ if p == nil || p.Body == nil {
+ return ""
+ }
+ return *p.Body
+}
+
+// GetChangedFiles returns the ChangedFiles field if it's non-nil, zero value otherwise.
+func (p *PullRequest) GetChangedFiles() int {
+ if p == nil || p.ChangedFiles == nil {
+ return 0
+ }
+ return *p.ChangedFiles
+}
+
+// GetClosedAt returns the ClosedAt field if it's non-nil, zero value otherwise.
+func (p *PullRequest) GetClosedAt() time.Time {
+ if p == nil || p.ClosedAt == nil {
+ return time.Time{}
+ }
+ return *p.ClosedAt
+}
+
+// GetComments returns the Comments field if it's non-nil, zero value otherwise.
+func (p *PullRequest) GetComments() int {
+ if p == nil || p.Comments == nil {
+ return 0
+ }
+ return *p.Comments
+}
+
+// GetCommits returns the Commits field if it's non-nil, zero value otherwise.
+func (p *PullRequest) GetCommits() int {
+ if p == nil || p.Commits == nil {
+ return 0
+ }
+ return *p.Commits
+}
+
+// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
+func (p *PullRequest) GetCreatedAt() time.Time {
+ if p == nil || p.CreatedAt == nil {
+ return time.Time{}
+ }
+ return *p.CreatedAt
+}
+
+// GetDeletions returns the Deletions field if it's non-nil, zero value otherwise.
+func (p *PullRequest) GetDeletions() int {
+ if p == nil || p.Deletions == nil {
+ return 0
+ }
+ return *p.Deletions
+}
+
+// GetDiffURL returns the DiffURL field if it's non-nil, zero value otherwise.
+func (p *PullRequest) GetDiffURL() string {
+ if p == nil || p.DiffURL == nil {
+ return ""
+ }
+ return *p.DiffURL
+}
+
+// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise.
+func (p *PullRequest) GetHTMLURL() string {
+ if p == nil || p.HTMLURL == nil {
+ return ""
+ }
+ return *p.HTMLURL
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (p *PullRequest) GetID() int {
+ if p == nil || p.ID == nil {
+ return 0
+ }
+ return *p.ID
+}
+
+// GetIssueURL returns the IssueURL field if it's non-nil, zero value otherwise.
+func (p *PullRequest) GetIssueURL() string {
+ if p == nil || p.IssueURL == nil {
+ return ""
+ }
+ return *p.IssueURL
+}
+
+// GetMaintainerCanModify returns the MaintainerCanModify field if it's non-nil, zero value otherwise.
+func (p *PullRequest) GetMaintainerCanModify() bool {
+ if p == nil || p.MaintainerCanModify == nil {
+ return false
+ }
+ return *p.MaintainerCanModify
+}
+
+// GetMergeable returns the Mergeable field if it's non-nil, zero value otherwise.
+func (p *PullRequest) GetMergeable() bool {
+ if p == nil || p.Mergeable == nil {
+ return false
+ }
+ return *p.Mergeable
+}
+
+// GetMerged returns the Merged field if it's non-nil, zero value otherwise.
+func (p *PullRequest) GetMerged() bool {
+ if p == nil || p.Merged == nil {
+ return false
+ }
+ return *p.Merged
+}
+
+// GetMergedAt returns the MergedAt field if it's non-nil, zero value otherwise.
+func (p *PullRequest) GetMergedAt() time.Time {
+ if p == nil || p.MergedAt == nil {
+ return time.Time{}
+ }
+ return *p.MergedAt
+}
+
+// GetNumber returns the Number field if it's non-nil, zero value otherwise.
+func (p *PullRequest) GetNumber() int {
+ if p == nil || p.Number == nil {
+ return 0
+ }
+ return *p.Number
+}
+
+// GetPatchURL returns the PatchURL field if it's non-nil, zero value otherwise.
+func (p *PullRequest) GetPatchURL() string {
+ if p == nil || p.PatchURL == nil {
+ return ""
+ }
+ return *p.PatchURL
+}
+
+// GetReviewCommentsURL returns the ReviewCommentsURL field if it's non-nil, zero value otherwise.
+func (p *PullRequest) GetReviewCommentsURL() string {
+ if p == nil || p.ReviewCommentsURL == nil {
+ return ""
+ }
+ return *p.ReviewCommentsURL
+}
+
+// GetReviewCommentURL returns the ReviewCommentURL field if it's non-nil, zero value otherwise.
+func (p *PullRequest) GetReviewCommentURL() string {
+ if p == nil || p.ReviewCommentURL == nil {
+ return ""
+ }
+ return *p.ReviewCommentURL
+}
+
+// GetState returns the State field if it's non-nil, zero value otherwise.
+func (p *PullRequest) GetState() string {
+ if p == nil || p.State == nil {
+ return ""
+ }
+ return *p.State
+}
+
+// GetStatusesURL returns the StatusesURL field if it's non-nil, zero value otherwise.
+func (p *PullRequest) GetStatusesURL() string {
+ if p == nil || p.StatusesURL == nil {
+ return ""
+ }
+ return *p.StatusesURL
+}
+
+// GetTitle returns the Title field if it's non-nil, zero value otherwise.
+func (p *PullRequest) GetTitle() string {
+ if p == nil || p.Title == nil {
+ return ""
+ }
+ return *p.Title
+}
+
+// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise.
+func (p *PullRequest) GetUpdatedAt() time.Time {
+ if p == nil || p.UpdatedAt == nil {
+ return time.Time{}
+ }
+ return *p.UpdatedAt
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (p *PullRequest) GetURL() string {
+ if p == nil || p.URL == nil {
+ return ""
+ }
+ return *p.URL
+}
+
+// GetLabel returns the Label field if it's non-nil, zero value otherwise.
+func (p *PullRequestBranch) GetLabel() string {
+ if p == nil || p.Label == nil {
+ return ""
+ }
+ return *p.Label
+}
+
+// GetRef returns the Ref field if it's non-nil, zero value otherwise.
+func (p *PullRequestBranch) GetRef() string {
+ if p == nil || p.Ref == nil {
+ return ""
+ }
+ return *p.Ref
+}
+
+// GetSHA returns the SHA field if it's non-nil, zero value otherwise.
+func (p *PullRequestBranch) GetSHA() string {
+ if p == nil || p.SHA == nil {
+ return ""
+ }
+ return *p.SHA
+}
+
+// GetBody returns the Body field if it's non-nil, zero value otherwise.
+func (p *PullRequestComment) GetBody() string {
+ if p == nil || p.Body == nil {
+ return ""
+ }
+ return *p.Body
+}
+
+// GetCommitID returns the CommitID field if it's non-nil, zero value otherwise.
+func (p *PullRequestComment) GetCommitID() string {
+ if p == nil || p.CommitID == nil {
+ return ""
+ }
+ return *p.CommitID
+}
+
+// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
+func (p *PullRequestComment) GetCreatedAt() time.Time {
+ if p == nil || p.CreatedAt == nil {
+ return time.Time{}
+ }
+ return *p.CreatedAt
+}
+
+// GetDiffHunk returns the DiffHunk field if it's non-nil, zero value otherwise.
+func (p *PullRequestComment) GetDiffHunk() string {
+ if p == nil || p.DiffHunk == nil {
+ return ""
+ }
+ return *p.DiffHunk
+}
+
+// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise.
+func (p *PullRequestComment) GetHTMLURL() string {
+ if p == nil || p.HTMLURL == nil {
+ return ""
+ }
+ return *p.HTMLURL
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (p *PullRequestComment) GetID() int {
+ if p == nil || p.ID == nil {
+ return 0
+ }
+ return *p.ID
+}
+
+// GetInReplyTo returns the InReplyTo field if it's non-nil, zero value otherwise.
+func (p *PullRequestComment) GetInReplyTo() int {
+ if p == nil || p.InReplyTo == nil {
+ return 0
+ }
+ return *p.InReplyTo
+}
+
+// GetOriginalCommitID returns the OriginalCommitID field if it's non-nil, zero value otherwise.
+func (p *PullRequestComment) GetOriginalCommitID() string {
+ if p == nil || p.OriginalCommitID == nil {
+ return ""
+ }
+ return *p.OriginalCommitID
+}
+
+// GetOriginalPosition returns the OriginalPosition field if it's non-nil, zero value otherwise.
+func (p *PullRequestComment) GetOriginalPosition() int {
+ if p == nil || p.OriginalPosition == nil {
+ return 0
+ }
+ return *p.OriginalPosition
+}
+
+// GetPath returns the Path field if it's non-nil, zero value otherwise.
+func (p *PullRequestComment) GetPath() string {
+ if p == nil || p.Path == nil {
+ return ""
+ }
+ return *p.Path
+}
+
+// GetPosition returns the Position field if it's non-nil, zero value otherwise.
+func (p *PullRequestComment) GetPosition() int {
+ if p == nil || p.Position == nil {
+ return 0
+ }
+ return *p.Position
+}
+
+// GetPullRequestURL returns the PullRequestURL field if it's non-nil, zero value otherwise.
+func (p *PullRequestComment) GetPullRequestURL() string {
+ if p == nil || p.PullRequestURL == nil {
+ return ""
+ }
+ return *p.PullRequestURL
+}
+
+// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise.
+func (p *PullRequestComment) GetUpdatedAt() time.Time {
+ if p == nil || p.UpdatedAt == nil {
+ return time.Time{}
+ }
+ return *p.UpdatedAt
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (p *PullRequestComment) GetURL() string {
+ if p == nil || p.URL == nil {
+ return ""
+ }
+ return *p.URL
+}
+
+// GetAction returns the Action field if it's non-nil, zero value otherwise.
+func (p *PullRequestEvent) GetAction() string {
+ if p == nil || p.Action == nil {
+ return ""
+ }
+ return *p.Action
+}
+
+// GetNumber returns the Number field if it's non-nil, zero value otherwise.
+func (p *PullRequestEvent) GetNumber() int {
+ if p == nil || p.Number == nil {
+ return 0
+ }
+ return *p.Number
+}
+
+// GetDiffURL returns the DiffURL field if it's non-nil, zero value otherwise.
+func (p *PullRequestLinks) GetDiffURL() string {
+ if p == nil || p.DiffURL == nil {
+ return ""
+ }
+ return *p.DiffURL
+}
+
+// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise.
+func (p *PullRequestLinks) GetHTMLURL() string {
+ if p == nil || p.HTMLURL == nil {
+ return ""
+ }
+ return *p.HTMLURL
+}
+
+// GetPatchURL returns the PatchURL field if it's non-nil, zero value otherwise.
+func (p *PullRequestLinks) GetPatchURL() string {
+ if p == nil || p.PatchURL == nil {
+ return ""
+ }
+ return *p.PatchURL
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (p *PullRequestLinks) GetURL() string {
+ if p == nil || p.URL == nil {
+ return ""
+ }
+ return *p.URL
+}
+
+// GetMerged returns the Merged field if it's non-nil, zero value otherwise.
+func (p *PullRequestMergeResult) GetMerged() bool {
+ if p == nil || p.Merged == nil {
+ return false
+ }
+ return *p.Merged
+}
+
+// GetMessage returns the Message field if it's non-nil, zero value otherwise.
+func (p *PullRequestMergeResult) GetMessage() string {
+ if p == nil || p.Message == nil {
+ return ""
+ }
+ return *p.Message
+}
+
+// GetSHA returns the SHA field if it's non-nil, zero value otherwise.
+func (p *PullRequestMergeResult) GetSHA() string {
+ if p == nil || p.SHA == nil {
+ return ""
+ }
+ return *p.SHA
+}
+
+// GetBody returns the Body field if it's non-nil, zero value otherwise.
+func (p *PullRequestReview) GetBody() string {
+ if p == nil || p.Body == nil {
+ return ""
+ }
+ return *p.Body
+}
+
+// GetCommitID returns the CommitID field if it's non-nil, zero value otherwise.
+func (p *PullRequestReview) GetCommitID() string {
+ if p == nil || p.CommitID == nil {
+ return ""
+ }
+ return *p.CommitID
+}
+
+// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise.
+func (p *PullRequestReview) GetHTMLURL() string {
+ if p == nil || p.HTMLURL == nil {
+ return ""
+ }
+ return *p.HTMLURL
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (p *PullRequestReview) GetID() int {
+ if p == nil || p.ID == nil {
+ return 0
+ }
+ return *p.ID
+}
+
+// GetPullRequestURL returns the PullRequestURL field if it's non-nil, zero value otherwise.
+func (p *PullRequestReview) GetPullRequestURL() string {
+ if p == nil || p.PullRequestURL == nil {
+ return ""
+ }
+ return *p.PullRequestURL
+}
+
+// GetState returns the State field if it's non-nil, zero value otherwise.
+func (p *PullRequestReview) GetState() string {
+ if p == nil || p.State == nil {
+ return ""
+ }
+ return *p.State
+}
+
+// GetSubmittedAt returns the SubmittedAt field if it's non-nil, zero value otherwise.
+func (p *PullRequestReview) GetSubmittedAt() time.Time {
+ if p == nil || p.SubmittedAt == nil {
+ return time.Time{}
+ }
+ return *p.SubmittedAt
+}
+
+// GetAction returns the Action field if it's non-nil, zero value otherwise.
+func (p *PullRequestReviewCommentEvent) GetAction() string {
+ if p == nil || p.Action == nil {
+ return ""
+ }
+ return *p.Action
+}
+
+// GetMessage returns the Message field if it's non-nil, zero value otherwise.
+func (p *PullRequestReviewDismissalRequest) GetMessage() string {
+ if p == nil || p.Message == nil {
+ return ""
+ }
+ return *p.Message
+}
+
+// GetAction returns the Action field if it's non-nil, zero value otherwise.
+func (p *PullRequestReviewEvent) GetAction() string {
+ if p == nil || p.Action == nil {
+ return ""
+ }
+ return *p.Action
+}
+
+// GetBody returns the Body field if it's non-nil, zero value otherwise.
+func (p *PullRequestReviewRequest) GetBody() string {
+ if p == nil || p.Body == nil {
+ return ""
+ }
+ return *p.Body
+}
+
+// GetEvent returns the Event field if it's non-nil, zero value otherwise.
+func (p *PullRequestReviewRequest) GetEvent() string {
+ if p == nil || p.Event == nil {
+ return ""
+ }
+ return *p.Event
+}
+
+// GetBase returns the Base field if it's non-nil, zero value otherwise.
+func (p *pullRequestUpdate) GetBase() string {
+ if p == nil || p.Base == nil {
+ return ""
+ }
+ return *p.Base
+}
+
+// GetBody returns the Body field if it's non-nil, zero value otherwise.
+func (p *pullRequestUpdate) GetBody() string {
+ if p == nil || p.Body == nil {
+ return ""
+ }
+ return *p.Body
+}
+
+// GetMaintainerCanModify returns the MaintainerCanModify field if it's non-nil, zero value otherwise.
+func (p *pullRequestUpdate) GetMaintainerCanModify() bool {
+ if p == nil || p.MaintainerCanModify == nil {
+ return false
+ }
+ return *p.MaintainerCanModify
+}
+
+// GetState returns the State field if it's non-nil, zero value otherwise.
+func (p *pullRequestUpdate) GetState() string {
+ if p == nil || p.State == nil {
+ return ""
+ }
+ return *p.State
+}
+
+// GetTitle returns the Title field if it's non-nil, zero value otherwise.
+func (p *pullRequestUpdate) GetTitle() string {
+ if p == nil || p.Title == nil {
+ return ""
+ }
+ return *p.Title
+}
+
+// GetCommits returns the Commits field if it's non-nil, zero value otherwise.
+func (p *PunchCard) GetCommits() int {
+ if p == nil || p.Commits == nil {
+ return 0
+ }
+ return *p.Commits
+}
+
+// GetDay returns the Day field if it's non-nil, zero value otherwise.
+func (p *PunchCard) GetDay() int {
+ if p == nil || p.Day == nil {
+ return 0
+ }
+ return *p.Day
+}
+
+// GetHour returns the Hour field if it's non-nil, zero value otherwise.
+func (p *PunchCard) GetHour() int {
+ if p == nil || p.Hour == nil {
+ return 0
+ }
+ return *p.Hour
+}
+
+// GetAfter returns the After field if it's non-nil, zero value otherwise.
+func (p *PushEvent) GetAfter() string {
+ if p == nil || p.After == nil {
+ return ""
+ }
+ return *p.After
+}
+
+// GetBaseRef returns the BaseRef field if it's non-nil, zero value otherwise.
+func (p *PushEvent) GetBaseRef() string {
+ if p == nil || p.BaseRef == nil {
+ return ""
+ }
+ return *p.BaseRef
+}
+
+// GetBefore returns the Before field if it's non-nil, zero value otherwise.
+func (p *PushEvent) GetBefore() string {
+ if p == nil || p.Before == nil {
+ return ""
+ }
+ return *p.Before
+}
+
+// GetCompare returns the Compare field if it's non-nil, zero value otherwise.
+func (p *PushEvent) GetCompare() string {
+ if p == nil || p.Compare == nil {
+ return ""
+ }
+ return *p.Compare
+}
+
+// GetCreated returns the Created field if it's non-nil, zero value otherwise.
+func (p *PushEvent) GetCreated() bool {
+ if p == nil || p.Created == nil {
+ return false
+ }
+ return *p.Created
+}
+
+// GetDeleted returns the Deleted field if it's non-nil, zero value otherwise.
+func (p *PushEvent) GetDeleted() bool {
+ if p == nil || p.Deleted == nil {
+ return false
+ }
+ return *p.Deleted
+}
+
+// GetDistinctSize returns the DistinctSize field if it's non-nil, zero value otherwise.
+func (p *PushEvent) GetDistinctSize() int {
+ if p == nil || p.DistinctSize == nil {
+ return 0
+ }
+ return *p.DistinctSize
+}
+
+// GetForced returns the Forced field if it's non-nil, zero value otherwise.
+func (p *PushEvent) GetForced() bool {
+ if p == nil || p.Forced == nil {
+ return false
+ }
+ return *p.Forced
+}
+
+// GetHead returns the Head field if it's non-nil, zero value otherwise.
+func (p *PushEvent) GetHead() string {
+ if p == nil || p.Head == nil {
+ return ""
+ }
+ return *p.Head
+}
+
+// GetPushID returns the PushID field if it's non-nil, zero value otherwise.
+func (p *PushEvent) GetPushID() int {
+ if p == nil || p.PushID == nil {
+ return 0
+ }
+ return *p.PushID
+}
+
+// GetRef returns the Ref field if it's non-nil, zero value otherwise.
+func (p *PushEvent) GetRef() string {
+ if p == nil || p.Ref == nil {
+ return ""
+ }
+ return *p.Ref
+}
+
+// GetSize returns the Size field if it's non-nil, zero value otherwise.
+func (p *PushEvent) GetSize() int {
+ if p == nil || p.Size == nil {
+ return 0
+ }
+ return *p.Size
+}
+
+// GetDistinct returns the Distinct field if it's non-nil, zero value otherwise.
+func (p *PushEventCommit) GetDistinct() bool {
+ if p == nil || p.Distinct == nil {
+ return false
+ }
+ return *p.Distinct
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (p *PushEventCommit) GetID() string {
+ if p == nil || p.ID == nil {
+ return ""
+ }
+ return *p.ID
+}
+
+// GetMessage returns the Message field if it's non-nil, zero value otherwise.
+func (p *PushEventCommit) GetMessage() string {
+ if p == nil || p.Message == nil {
+ return ""
+ }
+ return *p.Message
+}
+
+// GetSHA returns the SHA field if it's non-nil, zero value otherwise.
+func (p *PushEventCommit) GetSHA() string {
+ if p == nil || p.SHA == nil {
+ return ""
+ }
+ return *p.SHA
+}
+
+// GetTimestamp returns the Timestamp field if it's non-nil, zero value otherwise.
+func (p *PushEventCommit) GetTimestamp() Timestamp {
+ if p == nil || p.Timestamp == nil {
+ return Timestamp{}
+ }
+ return *p.Timestamp
+}
+
+// GetTreeID returns the TreeID field if it's non-nil, zero value otherwise.
+func (p *PushEventCommit) GetTreeID() string {
+ if p == nil || p.TreeID == nil {
+ return ""
+ }
+ return *p.TreeID
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (p *PushEventCommit) GetURL() string {
+ if p == nil || p.URL == nil {
+ return ""
+ }
+ return *p.URL
+}
+
+// GetEmail returns the Email field if it's non-nil, zero value otherwise.
+func (p *PushEventRepoOwner) GetEmail() string {
+ if p == nil || p.Email == nil {
+ return ""
+ }
+ return *p.Email
+}
+
+// GetName returns the Name field if it's non-nil, zero value otherwise.
+func (p *PushEventRepoOwner) GetName() string {
+ if p == nil || p.Name == nil {
+ return ""
+ }
+ return *p.Name
+}
+
+// GetArchiveURL returns the ArchiveURL field if it's non-nil, zero value otherwise.
+func (p *PushEventRepository) GetArchiveURL() string {
+ if p == nil || p.ArchiveURL == nil {
+ return ""
+ }
+ return *p.ArchiveURL
+}
+
+// GetCloneURL returns the CloneURL field if it's non-nil, zero value otherwise.
+func (p *PushEventRepository) GetCloneURL() string {
+ if p == nil || p.CloneURL == nil {
+ return ""
+ }
+ return *p.CloneURL
+}
+
+// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
+func (p *PushEventRepository) GetCreatedAt() Timestamp {
+ if p == nil || p.CreatedAt == nil {
+ return Timestamp{}
+ }
+ return *p.CreatedAt
+}
+
+// GetDefaultBranch returns the DefaultBranch field if it's non-nil, zero value otherwise.
+func (p *PushEventRepository) GetDefaultBranch() string {
+ if p == nil || p.DefaultBranch == nil {
+ return ""
+ }
+ return *p.DefaultBranch
+}
+
+// GetDescription returns the Description field if it's non-nil, zero value otherwise.
+func (p *PushEventRepository) GetDescription() string {
+ if p == nil || p.Description == nil {
+ return ""
+ }
+ return *p.Description
+}
+
+// GetFork returns the Fork field if it's non-nil, zero value otherwise.
+func (p *PushEventRepository) GetFork() bool {
+ if p == nil || p.Fork == nil {
+ return false
+ }
+ return *p.Fork
+}
+
+// GetForksCount returns the ForksCount field if it's non-nil, zero value otherwise.
+func (p *PushEventRepository) GetForksCount() int {
+ if p == nil || p.ForksCount == nil {
+ return 0
+ }
+ return *p.ForksCount
+}
+
+// GetFullName returns the FullName field if it's non-nil, zero value otherwise.
+func (p *PushEventRepository) GetFullName() string {
+ if p == nil || p.FullName == nil {
+ return ""
+ }
+ return *p.FullName
+}
+
+// GetGitURL returns the GitURL field if it's non-nil, zero value otherwise.
+func (p *PushEventRepository) GetGitURL() string {
+ if p == nil || p.GitURL == nil {
+ return ""
+ }
+ return *p.GitURL
+}
+
+// GetHasDownloads returns the HasDownloads field if it's non-nil, zero value otherwise.
+func (p *PushEventRepository) GetHasDownloads() bool {
+ if p == nil || p.HasDownloads == nil {
+ return false
+ }
+ return *p.HasDownloads
+}
+
+// GetHasIssues returns the HasIssues field if it's non-nil, zero value otherwise.
+func (p *PushEventRepository) GetHasIssues() bool {
+ if p == nil || p.HasIssues == nil {
+ return false
+ }
+ return *p.HasIssues
+}
+
+// GetHasPages returns the HasPages field if it's non-nil, zero value otherwise.
+func (p *PushEventRepository) GetHasPages() bool {
+ if p == nil || p.HasPages == nil {
+ return false
+ }
+ return *p.HasPages
+}
+
+// GetHasWiki returns the HasWiki field if it's non-nil, zero value otherwise.
+func (p *PushEventRepository) GetHasWiki() bool {
+ if p == nil || p.HasWiki == nil {
+ return false
+ }
+ return *p.HasWiki
+}
+
+// GetHomepage returns the Homepage field if it's non-nil, zero value otherwise.
+func (p *PushEventRepository) GetHomepage() string {
+ if p == nil || p.Homepage == nil {
+ return ""
+ }
+ return *p.Homepage
+}
+
+// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise.
+func (p *PushEventRepository) GetHTMLURL() string {
+ if p == nil || p.HTMLURL == nil {
+ return ""
+ }
+ return *p.HTMLURL
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (p *PushEventRepository) GetID() int {
+ if p == nil || p.ID == nil {
+ return 0
+ }
+ return *p.ID
+}
+
+// GetLanguage returns the Language field if it's non-nil, zero value otherwise.
+func (p *PushEventRepository) GetLanguage() string {
+ if p == nil || p.Language == nil {
+ return ""
+ }
+ return *p.Language
+}
+
+// GetMasterBranch returns the MasterBranch field if it's non-nil, zero value otherwise.
+func (p *PushEventRepository) GetMasterBranch() string {
+ if p == nil || p.MasterBranch == nil {
+ return ""
+ }
+ return *p.MasterBranch
+}
+
+// GetName returns the Name field if it's non-nil, zero value otherwise.
+func (p *PushEventRepository) GetName() string {
+ if p == nil || p.Name == nil {
+ return ""
+ }
+ return *p.Name
+}
+
+// GetOpenIssuesCount returns the OpenIssuesCount field if it's non-nil, zero value otherwise.
+func (p *PushEventRepository) GetOpenIssuesCount() int {
+ if p == nil || p.OpenIssuesCount == nil {
+ return 0
+ }
+ return *p.OpenIssuesCount
+}
+
+// GetOrganization returns the Organization field if it's non-nil, zero value otherwise.
+func (p *PushEventRepository) GetOrganization() string {
+ if p == nil || p.Organization == nil {
+ return ""
+ }
+ return *p.Organization
+}
+
+// GetPrivate returns the Private field if it's non-nil, zero value otherwise.
+func (p *PushEventRepository) GetPrivate() bool {
+ if p == nil || p.Private == nil {
+ return false
+ }
+ return *p.Private
+}
+
+// GetPushedAt returns the PushedAt field if it's non-nil, zero value otherwise.
+func (p *PushEventRepository) GetPushedAt() Timestamp {
+ if p == nil || p.PushedAt == nil {
+ return Timestamp{}
+ }
+ return *p.PushedAt
+}
+
+// GetSize returns the Size field if it's non-nil, zero value otherwise.
+func (p *PushEventRepository) GetSize() int {
+ if p == nil || p.Size == nil {
+ return 0
+ }
+ return *p.Size
+}
+
+// GetSSHURL returns the SSHURL field if it's non-nil, zero value otherwise.
+func (p *PushEventRepository) GetSSHURL() string {
+ if p == nil || p.SSHURL == nil {
+ return ""
+ }
+ return *p.SSHURL
+}
+
+// GetStargazersCount returns the StargazersCount field if it's non-nil, zero value otherwise.
+func (p *PushEventRepository) GetStargazersCount() int {
+ if p == nil || p.StargazersCount == nil {
+ return 0
+ }
+ return *p.StargazersCount
+}
+
+// GetStatusesURL returns the StatusesURL field if it's non-nil, zero value otherwise.
+func (p *PushEventRepository) GetStatusesURL() string {
+ if p == nil || p.StatusesURL == nil {
+ return ""
+ }
+ return *p.StatusesURL
+}
+
+// GetSVNURL returns the SVNURL field if it's non-nil, zero value otherwise.
+func (p *PushEventRepository) GetSVNURL() string {
+ if p == nil || p.SVNURL == nil {
+ return ""
+ }
+ return *p.SVNURL
+}
+
+// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise.
+func (p *PushEventRepository) GetUpdatedAt() Timestamp {
+ if p == nil || p.UpdatedAt == nil {
+ return Timestamp{}
+ }
+ return *p.UpdatedAt
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (p *PushEventRepository) GetURL() string {
+ if p == nil || p.URL == nil {
+ return ""
+ }
+ return *p.URL
+}
+
+// GetWatchersCount returns the WatchersCount field if it's non-nil, zero value otherwise.
+func (p *PushEventRepository) GetWatchersCount() int {
+ if p == nil || p.WatchersCount == nil {
+ return 0
+ }
+ return *p.WatchersCount
+}
+
+// GetContent returns the Content field if it's non-nil, zero value otherwise.
+func (r *Reaction) GetContent() string {
+ if r == nil || r.Content == nil {
+ return ""
+ }
+ return *r.Content
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (r *Reaction) GetID() int {
+ if r == nil || r.ID == nil {
+ return 0
+ }
+ return *r.ID
+}
+
+// GetConfused returns the Confused field if it's non-nil, zero value otherwise.
+func (r *Reactions) GetConfused() int {
+ if r == nil || r.Confused == nil {
+ return 0
+ }
+ return *r.Confused
+}
+
+// GetHeart returns the Heart field if it's non-nil, zero value otherwise.
+func (r *Reactions) GetHeart() int {
+ if r == nil || r.Heart == nil {
+ return 0
+ }
+ return *r.Heart
+}
+
+// GetHooray returns the Hooray field if it's non-nil, zero value otherwise.
+func (r *Reactions) GetHooray() int {
+ if r == nil || r.Hooray == nil {
+ return 0
+ }
+ return *r.Hooray
+}
+
+// GetLaugh returns the Laugh field if it's non-nil, zero value otherwise.
+func (r *Reactions) GetLaugh() int {
+ if r == nil || r.Laugh == nil {
+ return 0
+ }
+ return *r.Laugh
+}
+
+// GetMinusOne returns the MinusOne field if it's non-nil, zero value otherwise.
+func (r *Reactions) GetMinusOne() int {
+ if r == nil || r.MinusOne == nil {
+ return 0
+ }
+ return *r.MinusOne
+}
+
+// GetPlusOne returns the PlusOne field if it's non-nil, zero value otherwise.
+func (r *Reactions) GetPlusOne() int {
+ if r == nil || r.PlusOne == nil {
+ return 0
+ }
+ return *r.PlusOne
+}
+
+// GetTotalCount returns the TotalCount field if it's non-nil, zero value otherwise.
+func (r *Reactions) GetTotalCount() int {
+ if r == nil || r.TotalCount == nil {
+ return 0
+ }
+ return *r.TotalCount
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (r *Reactions) GetURL() string {
+ if r == nil || r.URL == nil {
+ return ""
+ }
+ return *r.URL
+}
+
+// GetRef returns the Ref field if it's non-nil, zero value otherwise.
+func (r *Reference) GetRef() string {
+ if r == nil || r.Ref == nil {
+ return ""
+ }
+ return *r.Ref
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (r *Reference) GetURL() string {
+ if r == nil || r.URL == nil {
+ return ""
+ }
+ return *r.URL
+}
+
+// GetBrowserDownloadURL returns the BrowserDownloadURL field if it's non-nil, zero value otherwise.
+func (r *ReleaseAsset) GetBrowserDownloadURL() string {
+ if r == nil || r.BrowserDownloadURL == nil {
+ return ""
+ }
+ return *r.BrowserDownloadURL
+}
+
+// GetContentType returns the ContentType field if it's non-nil, zero value otherwise.
+func (r *ReleaseAsset) GetContentType() string {
+ if r == nil || r.ContentType == nil {
+ return ""
+ }
+ return *r.ContentType
+}
+
+// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
+func (r *ReleaseAsset) GetCreatedAt() Timestamp {
+ if r == nil || r.CreatedAt == nil {
+ return Timestamp{}
+ }
+ return *r.CreatedAt
+}
+
+// GetDownloadCount returns the DownloadCount field if it's non-nil, zero value otherwise.
+func (r *ReleaseAsset) GetDownloadCount() int {
+ if r == nil || r.DownloadCount == nil {
+ return 0
+ }
+ return *r.DownloadCount
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (r *ReleaseAsset) GetID() int {
+ if r == nil || r.ID == nil {
+ return 0
+ }
+ return *r.ID
+}
+
+// GetLabel returns the Label field if it's non-nil, zero value otherwise.
+func (r *ReleaseAsset) GetLabel() string {
+ if r == nil || r.Label == nil {
+ return ""
+ }
+ return *r.Label
+}
+
+// GetName returns the Name field if it's non-nil, zero value otherwise.
+func (r *ReleaseAsset) GetName() string {
+ if r == nil || r.Name == nil {
+ return ""
+ }
+ return *r.Name
+}
+
+// GetSize returns the Size field if it's non-nil, zero value otherwise.
+func (r *ReleaseAsset) GetSize() int {
+ if r == nil || r.Size == nil {
+ return 0
+ }
+ return *r.Size
+}
+
+// GetState returns the State field if it's non-nil, zero value otherwise.
+func (r *ReleaseAsset) GetState() string {
+ if r == nil || r.State == nil {
+ return ""
+ }
+ return *r.State
+}
+
+// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise.
+func (r *ReleaseAsset) GetUpdatedAt() Timestamp {
+ if r == nil || r.UpdatedAt == nil {
+ return Timestamp{}
+ }
+ return *r.UpdatedAt
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (r *ReleaseAsset) GetURL() string {
+ if r == nil || r.URL == nil {
+ return ""
+ }
+ return *r.URL
+}
+
+// GetAction returns the Action field if it's non-nil, zero value otherwise.
+func (r *ReleaseEvent) GetAction() string {
+ if r == nil || r.Action == nil {
+ return ""
+ }
+ return *r.Action
+}
+
+// GetFrom returns the From field if it's non-nil, zero value otherwise.
+func (r *Rename) GetFrom() string {
+ if r == nil || r.From == nil {
+ return ""
+ }
+ return *r.From
+}
+
+// GetTo returns the To field if it's non-nil, zero value otherwise.
+func (r *Rename) GetTo() string {
+ if r == nil || r.To == nil {
+ return ""
+ }
+ return *r.To
+}
+
+// GetIncompleteResults returns the IncompleteResults field if it's non-nil, zero value otherwise.
+func (r *RepositoriesSearchResult) GetIncompleteResults() bool {
+ if r == nil || r.IncompleteResults == nil {
+ return false
+ }
+ return *r.IncompleteResults
+}
+
+// GetTotal returns the Total field if it's non-nil, zero value otherwise.
+func (r *RepositoriesSearchResult) GetTotal() int {
+ if r == nil || r.Total == nil {
+ return 0
+ }
+ return *r.Total
+}
+
+// GetAllowMergeCommit returns the AllowMergeCommit field if it's non-nil, zero value otherwise.
+func (r *Repository) GetAllowMergeCommit() bool {
+ if r == nil || r.AllowMergeCommit == nil {
+ return false
+ }
+ return *r.AllowMergeCommit
+}
+
+// GetAllowRebaseMerge returns the AllowRebaseMerge field if it's non-nil, zero value otherwise.
+func (r *Repository) GetAllowRebaseMerge() bool {
+ if r == nil || r.AllowRebaseMerge == nil {
+ return false
+ }
+ return *r.AllowRebaseMerge
+}
+
+// GetAllowSquashMerge returns the AllowSquashMerge field if it's non-nil, zero value otherwise.
+func (r *Repository) GetAllowSquashMerge() bool {
+ if r == nil || r.AllowSquashMerge == nil {
+ return false
+ }
+ return *r.AllowSquashMerge
+}
+
+// GetArchiveURL returns the ArchiveURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetArchiveURL() string {
+ if r == nil || r.ArchiveURL == nil {
+ return ""
+ }
+ return *r.ArchiveURL
+}
+
+// GetAssigneesURL returns the AssigneesURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetAssigneesURL() string {
+ if r == nil || r.AssigneesURL == nil {
+ return ""
+ }
+ return *r.AssigneesURL
+}
+
+// GetAutoInit returns the AutoInit field if it's non-nil, zero value otherwise.
+func (r *Repository) GetAutoInit() bool {
+ if r == nil || r.AutoInit == nil {
+ return false
+ }
+ return *r.AutoInit
+}
+
+// GetBlobsURL returns the BlobsURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetBlobsURL() string {
+ if r == nil || r.BlobsURL == nil {
+ return ""
+ }
+ return *r.BlobsURL
+}
+
+// GetBranchesURL returns the BranchesURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetBranchesURL() string {
+ if r == nil || r.BranchesURL == nil {
+ return ""
+ }
+ return *r.BranchesURL
+}
+
+// GetCloneURL returns the CloneURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetCloneURL() string {
+ if r == nil || r.CloneURL == nil {
+ return ""
+ }
+ return *r.CloneURL
+}
+
+// GetCollaboratorsURL returns the CollaboratorsURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetCollaboratorsURL() string {
+ if r == nil || r.CollaboratorsURL == nil {
+ return ""
+ }
+ return *r.CollaboratorsURL
+}
+
+// GetCommentsURL returns the CommentsURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetCommentsURL() string {
+ if r == nil || r.CommentsURL == nil {
+ return ""
+ }
+ return *r.CommentsURL
+}
+
+// GetCommitsURL returns the CommitsURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetCommitsURL() string {
+ if r == nil || r.CommitsURL == nil {
+ return ""
+ }
+ return *r.CommitsURL
+}
+
+// GetCompareURL returns the CompareURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetCompareURL() string {
+ if r == nil || r.CompareURL == nil {
+ return ""
+ }
+ return *r.CompareURL
+}
+
+// GetContentsURL returns the ContentsURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetContentsURL() string {
+ if r == nil || r.ContentsURL == nil {
+ return ""
+ }
+ return *r.ContentsURL
+}
+
+// GetContributorsURL returns the ContributorsURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetContributorsURL() string {
+ if r == nil || r.ContributorsURL == nil {
+ return ""
+ }
+ return *r.ContributorsURL
+}
+
+// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
+func (r *Repository) GetCreatedAt() Timestamp {
+ if r == nil || r.CreatedAt == nil {
+ return Timestamp{}
+ }
+ return *r.CreatedAt
+}
+
+// GetDefaultBranch returns the DefaultBranch field if it's non-nil, zero value otherwise.
+func (r *Repository) GetDefaultBranch() string {
+ if r == nil || r.DefaultBranch == nil {
+ return ""
+ }
+ return *r.DefaultBranch
+}
+
+// GetDeploymentsURL returns the DeploymentsURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetDeploymentsURL() string {
+ if r == nil || r.DeploymentsURL == nil {
+ return ""
+ }
+ return *r.DeploymentsURL
+}
+
+// GetDescription returns the Description field if it's non-nil, zero value otherwise.
+func (r *Repository) GetDescription() string {
+ if r == nil || r.Description == nil {
+ return ""
+ }
+ return *r.Description
+}
+
+// GetDownloadsURL returns the DownloadsURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetDownloadsURL() string {
+ if r == nil || r.DownloadsURL == nil {
+ return ""
+ }
+ return *r.DownloadsURL
+}
+
+// GetEventsURL returns the EventsURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetEventsURL() string {
+ if r == nil || r.EventsURL == nil {
+ return ""
+ }
+ return *r.EventsURL
+}
+
+// GetFork returns the Fork field if it's non-nil, zero value otherwise.
+func (r *Repository) GetFork() bool {
+ if r == nil || r.Fork == nil {
+ return false
+ }
+ return *r.Fork
+}
+
+// GetForksCount returns the ForksCount field if it's non-nil, zero value otherwise.
+func (r *Repository) GetForksCount() int {
+ if r == nil || r.ForksCount == nil {
+ return 0
+ }
+ return *r.ForksCount
+}
+
+// GetForksURL returns the ForksURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetForksURL() string {
+ if r == nil || r.ForksURL == nil {
+ return ""
+ }
+ return *r.ForksURL
+}
+
+// GetFullName returns the FullName field if it's non-nil, zero value otherwise.
+func (r *Repository) GetFullName() string {
+ if r == nil || r.FullName == nil {
+ return ""
+ }
+ return *r.FullName
+}
+
+// GetGitCommitsURL returns the GitCommitsURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetGitCommitsURL() string {
+ if r == nil || r.GitCommitsURL == nil {
+ return ""
+ }
+ return *r.GitCommitsURL
+}
+
+// GetGitignoreTemplate returns the GitignoreTemplate field if it's non-nil, zero value otherwise.
+func (r *Repository) GetGitignoreTemplate() string {
+ if r == nil || r.GitignoreTemplate == nil {
+ return ""
+ }
+ return *r.GitignoreTemplate
+}
+
+// GetGitRefsURL returns the GitRefsURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetGitRefsURL() string {
+ if r == nil || r.GitRefsURL == nil {
+ return ""
+ }
+ return *r.GitRefsURL
+}
+
+// GetGitTagsURL returns the GitTagsURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetGitTagsURL() string {
+ if r == nil || r.GitTagsURL == nil {
+ return ""
+ }
+ return *r.GitTagsURL
+}
+
+// GetGitURL returns the GitURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetGitURL() string {
+ if r == nil || r.GitURL == nil {
+ return ""
+ }
+ return *r.GitURL
+}
+
+// GetHasDownloads returns the HasDownloads field if it's non-nil, zero value otherwise.
+func (r *Repository) GetHasDownloads() bool {
+ if r == nil || r.HasDownloads == nil {
+ return false
+ }
+ return *r.HasDownloads
+}
+
+// GetHasIssues returns the HasIssues field if it's non-nil, zero value otherwise.
+func (r *Repository) GetHasIssues() bool {
+ if r == nil || r.HasIssues == nil {
+ return false
+ }
+ return *r.HasIssues
+}
+
+// GetHasPages returns the HasPages field if it's non-nil, zero value otherwise.
+func (r *Repository) GetHasPages() bool {
+ if r == nil || r.HasPages == nil {
+ return false
+ }
+ return *r.HasPages
+}
+
+// GetHasWiki returns the HasWiki field if it's non-nil, zero value otherwise.
+func (r *Repository) GetHasWiki() bool {
+ if r == nil || r.HasWiki == nil {
+ return false
+ }
+ return *r.HasWiki
+}
+
+// GetHomepage returns the Homepage field if it's non-nil, zero value otherwise.
+func (r *Repository) GetHomepage() string {
+ if r == nil || r.Homepage == nil {
+ return ""
+ }
+ return *r.Homepage
+}
+
+// GetHooksURL returns the HooksURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetHooksURL() string {
+ if r == nil || r.HooksURL == nil {
+ return ""
+ }
+ return *r.HooksURL
+}
+
+// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetHTMLURL() string {
+ if r == nil || r.HTMLURL == nil {
+ return ""
+ }
+ return *r.HTMLURL
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (r *Repository) GetID() int {
+ if r == nil || r.ID == nil {
+ return 0
+ }
+ return *r.ID
+}
+
+// GetIssueCommentURL returns the IssueCommentURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetIssueCommentURL() string {
+ if r == nil || r.IssueCommentURL == nil {
+ return ""
+ }
+ return *r.IssueCommentURL
+}
+
+// GetIssueEventsURL returns the IssueEventsURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetIssueEventsURL() string {
+ if r == nil || r.IssueEventsURL == nil {
+ return ""
+ }
+ return *r.IssueEventsURL
+}
+
+// GetIssuesURL returns the IssuesURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetIssuesURL() string {
+ if r == nil || r.IssuesURL == nil {
+ return ""
+ }
+ return *r.IssuesURL
+}
+
+// GetKeysURL returns the KeysURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetKeysURL() string {
+ if r == nil || r.KeysURL == nil {
+ return ""
+ }
+ return *r.KeysURL
+}
+
+// GetLabelsURL returns the LabelsURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetLabelsURL() string {
+ if r == nil || r.LabelsURL == nil {
+ return ""
+ }
+ return *r.LabelsURL
+}
+
+// GetLanguage returns the Language field if it's non-nil, zero value otherwise.
+func (r *Repository) GetLanguage() string {
+ if r == nil || r.Language == nil {
+ return ""
+ }
+ return *r.Language
+}
+
+// GetLanguagesURL returns the LanguagesURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetLanguagesURL() string {
+ if r == nil || r.LanguagesURL == nil {
+ return ""
+ }
+ return *r.LanguagesURL
+}
+
+// GetLicenseTemplate returns the LicenseTemplate field if it's non-nil, zero value otherwise.
+func (r *Repository) GetLicenseTemplate() string {
+ if r == nil || r.LicenseTemplate == nil {
+ return ""
+ }
+ return *r.LicenseTemplate
+}
+
+// GetMasterBranch returns the MasterBranch field if it's non-nil, zero value otherwise.
+func (r *Repository) GetMasterBranch() string {
+ if r == nil || r.MasterBranch == nil {
+ return ""
+ }
+ return *r.MasterBranch
+}
+
+// GetMergesURL returns the MergesURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetMergesURL() string {
+ if r == nil || r.MergesURL == nil {
+ return ""
+ }
+ return *r.MergesURL
+}
+
+// GetMilestonesURL returns the MilestonesURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetMilestonesURL() string {
+ if r == nil || r.MilestonesURL == nil {
+ return ""
+ }
+ return *r.MilestonesURL
+}
+
+// GetMirrorURL returns the MirrorURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetMirrorURL() string {
+ if r == nil || r.MirrorURL == nil {
+ return ""
+ }
+ return *r.MirrorURL
+}
+
+// GetName returns the Name field if it's non-nil, zero value otherwise.
+func (r *Repository) GetName() string {
+ if r == nil || r.Name == nil {
+ return ""
+ }
+ return *r.Name
+}
+
+// GetNetworkCount returns the NetworkCount field if it's non-nil, zero value otherwise.
+func (r *Repository) GetNetworkCount() int {
+ if r == nil || r.NetworkCount == nil {
+ return 0
+ }
+ return *r.NetworkCount
+}
+
+// GetNotificationsURL returns the NotificationsURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetNotificationsURL() string {
+ if r == nil || r.NotificationsURL == nil {
+ return ""
+ }
+ return *r.NotificationsURL
+}
+
+// GetOpenIssuesCount returns the OpenIssuesCount field if it's non-nil, zero value otherwise.
+func (r *Repository) GetOpenIssuesCount() int {
+ if r == nil || r.OpenIssuesCount == nil {
+ return 0
+ }
+ return *r.OpenIssuesCount
+}
+
+// GetPermissions returns the Permissions field if it's non-nil, zero value otherwise.
+func (r *Repository) GetPermissions() map[string]bool {
+ if r == nil || r.Permissions == nil {
+ return map[string]bool{}
+ }
+ return *r.Permissions
+}
+
+// GetPrivate returns the Private field if it's non-nil, zero value otherwise.
+func (r *Repository) GetPrivate() bool {
+ if r == nil || r.Private == nil {
+ return false
+ }
+ return *r.Private
+}
+
+// GetPullsURL returns the PullsURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetPullsURL() string {
+ if r == nil || r.PullsURL == nil {
+ return ""
+ }
+ return *r.PullsURL
+}
+
+// GetPushedAt returns the PushedAt field if it's non-nil, zero value otherwise.
+func (r *Repository) GetPushedAt() Timestamp {
+ if r == nil || r.PushedAt == nil {
+ return Timestamp{}
+ }
+ return *r.PushedAt
+}
+
+// GetReleasesURL returns the ReleasesURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetReleasesURL() string {
+ if r == nil || r.ReleasesURL == nil {
+ return ""
+ }
+ return *r.ReleasesURL
+}
+
+// GetSize returns the Size field if it's non-nil, zero value otherwise.
+func (r *Repository) GetSize() int {
+ if r == nil || r.Size == nil {
+ return 0
+ }
+ return *r.Size
+}
+
+// GetSSHURL returns the SSHURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetSSHURL() string {
+ if r == nil || r.SSHURL == nil {
+ return ""
+ }
+ return *r.SSHURL
+}
+
+// GetStargazersCount returns the StargazersCount field if it's non-nil, zero value otherwise.
+func (r *Repository) GetStargazersCount() int {
+ if r == nil || r.StargazersCount == nil {
+ return 0
+ }
+ return *r.StargazersCount
+}
+
+// GetStargazersURL returns the StargazersURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetStargazersURL() string {
+ if r == nil || r.StargazersURL == nil {
+ return ""
+ }
+ return *r.StargazersURL
+}
+
+// GetStatusesURL returns the StatusesURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetStatusesURL() string {
+ if r == nil || r.StatusesURL == nil {
+ return ""
+ }
+ return *r.StatusesURL
+}
+
+// GetSubscribersCount returns the SubscribersCount field if it's non-nil, zero value otherwise.
+func (r *Repository) GetSubscribersCount() int {
+ if r == nil || r.SubscribersCount == nil {
+ return 0
+ }
+ return *r.SubscribersCount
+}
+
+// GetSubscribersURL returns the SubscribersURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetSubscribersURL() string {
+ if r == nil || r.SubscribersURL == nil {
+ return ""
+ }
+ return *r.SubscribersURL
+}
+
+// GetSubscriptionURL returns the SubscriptionURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetSubscriptionURL() string {
+ if r == nil || r.SubscriptionURL == nil {
+ return ""
+ }
+ return *r.SubscriptionURL
+}
+
+// GetSVNURL returns the SVNURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetSVNURL() string {
+ if r == nil || r.SVNURL == nil {
+ return ""
+ }
+ return *r.SVNURL
+}
+
+// GetTagsURL returns the TagsURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetTagsURL() string {
+ if r == nil || r.TagsURL == nil {
+ return ""
+ }
+ return *r.TagsURL
+}
+
+// GetTeamID returns the TeamID field if it's non-nil, zero value otherwise.
+func (r *Repository) GetTeamID() int {
+ if r == nil || r.TeamID == nil {
+ return 0
+ }
+ return *r.TeamID
+}
+
+// GetTeamsURL returns the TeamsURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetTeamsURL() string {
+ if r == nil || r.TeamsURL == nil {
+ return ""
+ }
+ return *r.TeamsURL
+}
+
+// GetTreesURL returns the TreesURL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetTreesURL() string {
+ if r == nil || r.TreesURL == nil {
+ return ""
+ }
+ return *r.TreesURL
+}
+
+// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise.
+func (r *Repository) GetUpdatedAt() Timestamp {
+ if r == nil || r.UpdatedAt == nil {
+ return Timestamp{}
+ }
+ return *r.UpdatedAt
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (r *Repository) GetURL() string {
+ if r == nil || r.URL == nil {
+ return ""
+ }
+ return *r.URL
+}
+
+// GetWatchersCount returns the WatchersCount field if it's non-nil, zero value otherwise.
+func (r *Repository) GetWatchersCount() int {
+ if r == nil || r.WatchersCount == nil {
+ return 0
+ }
+ return *r.WatchersCount
+}
+
+// GetBody returns the Body field if it's non-nil, zero value otherwise.
+func (r *RepositoryComment) GetBody() string {
+ if r == nil || r.Body == nil {
+ return ""
+ }
+ return *r.Body
+}
+
+// GetCommitID returns the CommitID field if it's non-nil, zero value otherwise.
+func (r *RepositoryComment) GetCommitID() string {
+ if r == nil || r.CommitID == nil {
+ return ""
+ }
+ return *r.CommitID
+}
+
+// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
+func (r *RepositoryComment) GetCreatedAt() time.Time {
+ if r == nil || r.CreatedAt == nil {
+ return time.Time{}
+ }
+ return *r.CreatedAt
+}
+
+// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise.
+func (r *RepositoryComment) GetHTMLURL() string {
+ if r == nil || r.HTMLURL == nil {
+ return ""
+ }
+ return *r.HTMLURL
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (r *RepositoryComment) GetID() int {
+ if r == nil || r.ID == nil {
+ return 0
+ }
+ return *r.ID
+}
+
+// GetPath returns the Path field if it's non-nil, zero value otherwise.
+func (r *RepositoryComment) GetPath() string {
+ if r == nil || r.Path == nil {
+ return ""
+ }
+ return *r.Path
+}
+
+// GetPosition returns the Position field if it's non-nil, zero value otherwise.
+func (r *RepositoryComment) GetPosition() int {
+ if r == nil || r.Position == nil {
+ return 0
+ }
+ return *r.Position
+}
+
+// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise.
+func (r *RepositoryComment) GetUpdatedAt() time.Time {
+ if r == nil || r.UpdatedAt == nil {
+ return time.Time{}
+ }
+ return *r.UpdatedAt
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (r *RepositoryComment) GetURL() string {
+ if r == nil || r.URL == nil {
+ return ""
+ }
+ return *r.URL
+}
+
+// GetCommentsURL returns the CommentsURL field if it's non-nil, zero value otherwise.
+func (r *RepositoryCommit) GetCommentsURL() string {
+ if r == nil || r.CommentsURL == nil {
+ return ""
+ }
+ return *r.CommentsURL
+}
+
+// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise.
+func (r *RepositoryCommit) GetHTMLURL() string {
+ if r == nil || r.HTMLURL == nil {
+ return ""
+ }
+ return *r.HTMLURL
+}
+
+// GetSHA returns the SHA field if it's non-nil, zero value otherwise.
+func (r *RepositoryCommit) GetSHA() string {
+ if r == nil || r.SHA == nil {
+ return ""
+ }
+ return *r.SHA
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (r *RepositoryCommit) GetURL() string {
+ if r == nil || r.URL == nil {
+ return ""
+ }
+ return *r.URL
+}
+
+// GetDownloadURL returns the DownloadURL field if it's non-nil, zero value otherwise.
+func (r *RepositoryContent) GetDownloadURL() string {
+ if r == nil || r.DownloadURL == nil {
+ return ""
+ }
+ return *r.DownloadURL
+}
+
+// GetEncoding returns the Encoding field if it's non-nil, zero value otherwise.
+func (r *RepositoryContent) GetEncoding() string {
+ if r == nil || r.Encoding == nil {
+ return ""
+ }
+ return *r.Encoding
+}
+
+// GetGitURL returns the GitURL field if it's non-nil, zero value otherwise.
+func (r *RepositoryContent) GetGitURL() string {
+ if r == nil || r.GitURL == nil {
+ return ""
+ }
+ return *r.GitURL
+}
+
+// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise.
+func (r *RepositoryContent) GetHTMLURL() string {
+ if r == nil || r.HTMLURL == nil {
+ return ""
+ }
+ return *r.HTMLURL
+}
+
+// GetName returns the Name field if it's non-nil, zero value otherwise.
+func (r *RepositoryContent) GetName() string {
+ if r == nil || r.Name == nil {
+ return ""
+ }
+ return *r.Name
+}
+
+// GetPath returns the Path field if it's non-nil, zero value otherwise.
+func (r *RepositoryContent) GetPath() string {
+ if r == nil || r.Path == nil {
+ return ""
+ }
+ return *r.Path
+}
+
+// GetSHA returns the SHA field if it's non-nil, zero value otherwise.
+func (r *RepositoryContent) GetSHA() string {
+ if r == nil || r.SHA == nil {
+ return ""
+ }
+ return *r.SHA
+}
+
+// GetSize returns the Size field if it's non-nil, zero value otherwise.
+func (r *RepositoryContent) GetSize() int {
+ if r == nil || r.Size == nil {
+ return 0
+ }
+ return *r.Size
+}
+
+// GetType returns the Type field if it's non-nil, zero value otherwise.
+func (r *RepositoryContent) GetType() string {
+ if r == nil || r.Type == nil {
+ return ""
+ }
+ return *r.Type
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (r *RepositoryContent) GetURL() string {
+ if r == nil || r.URL == nil {
+ return ""
+ }
+ return *r.URL
+}
+
+// GetBranch returns the Branch field if it's non-nil, zero value otherwise.
+func (r *RepositoryContentFileOptions) GetBranch() string {
+ if r == nil || r.Branch == nil {
+ return ""
+ }
+ return *r.Branch
+}
+
+// GetMessage returns the Message field if it's non-nil, zero value otherwise.
+func (r *RepositoryContentFileOptions) GetMessage() string {
+ if r == nil || r.Message == nil {
+ return ""
+ }
+ return *r.Message
+}
+
+// GetSHA returns the SHA field if it's non-nil, zero value otherwise.
+func (r *RepositoryContentFileOptions) GetSHA() string {
+ if r == nil || r.SHA == nil {
+ return ""
+ }
+ return *r.SHA
+}
+
+// GetAction returns the Action field if it's non-nil, zero value otherwise.
+func (r *RepositoryEvent) GetAction() string {
+ if r == nil || r.Action == nil {
+ return ""
+ }
+ return *r.Action
+}
+
+// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
+func (r *RepositoryInvitation) GetCreatedAt() Timestamp {
+ if r == nil || r.CreatedAt == nil {
+ return Timestamp{}
+ }
+ return *r.CreatedAt
+}
+
+// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise.
+func (r *RepositoryInvitation) GetHTMLURL() string {
+ if r == nil || r.HTMLURL == nil {
+ return ""
+ }
+ return *r.HTMLURL
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (r *RepositoryInvitation) GetID() int {
+ if r == nil || r.ID == nil {
+ return 0
+ }
+ return *r.ID
+}
+
+// GetPermissions returns the Permissions field if it's non-nil, zero value otherwise.
+func (r *RepositoryInvitation) GetPermissions() string {
+ if r == nil || r.Permissions == nil {
+ return ""
+ }
+ return *r.Permissions
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (r *RepositoryInvitation) GetURL() string {
+ if r == nil || r.URL == nil {
+ return ""
+ }
+ return *r.URL
+}
+
+// GetContent returns the Content field if it's non-nil, zero value otherwise.
+func (r *RepositoryLicense) GetContent() string {
+ if r == nil || r.Content == nil {
+ return ""
+ }
+ return *r.Content
+}
+
+// GetDownloadURL returns the DownloadURL field if it's non-nil, zero value otherwise.
+func (r *RepositoryLicense) GetDownloadURL() string {
+ if r == nil || r.DownloadURL == nil {
+ return ""
+ }
+ return *r.DownloadURL
+}
+
+// GetEncoding returns the Encoding field if it's non-nil, zero value otherwise.
+func (r *RepositoryLicense) GetEncoding() string {
+ if r == nil || r.Encoding == nil {
+ return ""
+ }
+ return *r.Encoding
+}
+
+// GetGitURL returns the GitURL field if it's non-nil, zero value otherwise.
+func (r *RepositoryLicense) GetGitURL() string {
+ if r == nil || r.GitURL == nil {
+ return ""
+ }
+ return *r.GitURL
+}
+
+// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise.
+func (r *RepositoryLicense) GetHTMLURL() string {
+ if r == nil || r.HTMLURL == nil {
+ return ""
+ }
+ return *r.HTMLURL
+}
+
+// GetName returns the Name field if it's non-nil, zero value otherwise.
+func (r *RepositoryLicense) GetName() string {
+ if r == nil || r.Name == nil {
+ return ""
+ }
+ return *r.Name
+}
+
+// GetPath returns the Path field if it's non-nil, zero value otherwise.
+func (r *RepositoryLicense) GetPath() string {
+ if r == nil || r.Path == nil {
+ return ""
+ }
+ return *r.Path
+}
+
+// GetSHA returns the SHA field if it's non-nil, zero value otherwise.
+func (r *RepositoryLicense) GetSHA() string {
+ if r == nil || r.SHA == nil {
+ return ""
+ }
+ return *r.SHA
+}
+
+// GetSize returns the Size field if it's non-nil, zero value otherwise.
+func (r *RepositoryLicense) GetSize() int {
+ if r == nil || r.Size == nil {
+ return 0
+ }
+ return *r.Size
+}
+
+// GetType returns the Type field if it's non-nil, zero value otherwise.
+func (r *RepositoryLicense) GetType() string {
+ if r == nil || r.Type == nil {
+ return ""
+ }
+ return *r.Type
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (r *RepositoryLicense) GetURL() string {
+ if r == nil || r.URL == nil {
+ return ""
+ }
+ return *r.URL
+}
+
+// GetBase returns the Base field if it's non-nil, zero value otherwise.
+func (r *RepositoryMergeRequest) GetBase() string {
+ if r == nil || r.Base == nil {
+ return ""
+ }
+ return *r.Base
+}
+
+// GetCommitMessage returns the CommitMessage field if it's non-nil, zero value otherwise.
+func (r *RepositoryMergeRequest) GetCommitMessage() string {
+ if r == nil || r.CommitMessage == nil {
+ return ""
+ }
+ return *r.CommitMessage
+}
+
+// GetHead returns the Head field if it's non-nil, zero value otherwise.
+func (r *RepositoryMergeRequest) GetHead() string {
+ if r == nil || r.Head == nil {
+ return ""
+ }
+ return *r.Head
+}
+
+// GetPermission returns the Permission field if it's non-nil, zero value otherwise.
+func (r *RepositoryPermissionLevel) GetPermission() string {
+ if r == nil || r.Permission == nil {
+ return ""
+ }
+ return *r.Permission
+}
+
+// GetAssetsURL returns the AssetsURL field if it's non-nil, zero value otherwise.
+func (r *RepositoryRelease) GetAssetsURL() string {
+ if r == nil || r.AssetsURL == nil {
+ return ""
+ }
+ return *r.AssetsURL
+}
+
+// GetBody returns the Body field if it's non-nil, zero value otherwise.
+func (r *RepositoryRelease) GetBody() string {
+ if r == nil || r.Body == nil {
+ return ""
+ }
+ return *r.Body
+}
+
+// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
+func (r *RepositoryRelease) GetCreatedAt() Timestamp {
+ if r == nil || r.CreatedAt == nil {
+ return Timestamp{}
+ }
+ return *r.CreatedAt
+}
+
+// GetDraft returns the Draft field if it's non-nil, zero value otherwise.
+func (r *RepositoryRelease) GetDraft() bool {
+ if r == nil || r.Draft == nil {
+ return false
+ }
+ return *r.Draft
+}
+
+// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise.
+func (r *RepositoryRelease) GetHTMLURL() string {
+ if r == nil || r.HTMLURL == nil {
+ return ""
+ }
+ return *r.HTMLURL
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (r *RepositoryRelease) GetID() int {
+ if r == nil || r.ID == nil {
+ return 0
+ }
+ return *r.ID
+}
+
+// GetName returns the Name field if it's non-nil, zero value otherwise.
+func (r *RepositoryRelease) GetName() string {
+ if r == nil || r.Name == nil {
+ return ""
+ }
+ return *r.Name
+}
+
+// GetPrerelease returns the Prerelease field if it's non-nil, zero value otherwise.
+func (r *RepositoryRelease) GetPrerelease() bool {
+ if r == nil || r.Prerelease == nil {
+ return false
+ }
+ return *r.Prerelease
+}
+
+// GetPublishedAt returns the PublishedAt field if it's non-nil, zero value otherwise.
+func (r *RepositoryRelease) GetPublishedAt() Timestamp {
+ if r == nil || r.PublishedAt == nil {
+ return Timestamp{}
+ }
+ return *r.PublishedAt
+}
+
+// GetTagName returns the TagName field if it's non-nil, zero value otherwise.
+func (r *RepositoryRelease) GetTagName() string {
+ if r == nil || r.TagName == nil {
+ return ""
+ }
+ return *r.TagName
+}
+
+// GetTarballURL returns the TarballURL field if it's non-nil, zero value otherwise.
+func (r *RepositoryRelease) GetTarballURL() string {
+ if r == nil || r.TarballURL == nil {
+ return ""
+ }
+ return *r.TarballURL
+}
+
+// GetTargetCommitish returns the TargetCommitish field if it's non-nil, zero value otherwise.
+func (r *RepositoryRelease) GetTargetCommitish() string {
+ if r == nil || r.TargetCommitish == nil {
+ return ""
+ }
+ return *r.TargetCommitish
+}
+
+// GetUploadURL returns the UploadURL field if it's non-nil, zero value otherwise.
+func (r *RepositoryRelease) GetUploadURL() string {
+ if r == nil || r.UploadURL == nil {
+ return ""
+ }
+ return *r.UploadURL
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (r *RepositoryRelease) GetURL() string {
+ if r == nil || r.URL == nil {
+ return ""
+ }
+ return *r.URL
+}
+
+// GetZipballURL returns the ZipballURL field if it's non-nil, zero value otherwise.
+func (r *RepositoryRelease) GetZipballURL() string {
+ if r == nil || r.ZipballURL == nil {
+ return ""
+ }
+ return *r.ZipballURL
+}
+
+// GetName returns the Name field if it's non-nil, zero value otherwise.
+func (r *RepositoryTag) GetName() string {
+ if r == nil || r.Name == nil {
+ return ""
+ }
+ return *r.Name
+}
+
+// GetTarballURL returns the TarballURL field if it's non-nil, zero value otherwise.
+func (r *RepositoryTag) GetTarballURL() string {
+ if r == nil || r.TarballURL == nil {
+ return ""
+ }
+ return *r.TarballURL
+}
+
+// GetZipballURL returns the ZipballURL field if it's non-nil, zero value otherwise.
+func (r *RepositoryTag) GetZipballURL() string {
+ if r == nil || r.ZipballURL == nil {
+ return ""
+ }
+ return *r.ZipballURL
+}
+
+// GetContext returns the Context field if it's non-nil, zero value otherwise.
+func (r *RepoStatus) GetContext() string {
+ if r == nil || r.Context == nil {
+ return ""
+ }
+ return *r.Context
+}
+
+// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
+func (r *RepoStatus) GetCreatedAt() time.Time {
+ if r == nil || r.CreatedAt == nil {
+ return time.Time{}
+ }
+ return *r.CreatedAt
+}
+
+// GetDescription returns the Description field if it's non-nil, zero value otherwise.
+func (r *RepoStatus) GetDescription() string {
+ if r == nil || r.Description == nil {
+ return ""
+ }
+ return *r.Description
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (r *RepoStatus) GetID() int {
+ if r == nil || r.ID == nil {
+ return 0
+ }
+ return *r.ID
+}
+
+// GetState returns the State field if it's non-nil, zero value otherwise.
+func (r *RepoStatus) GetState() string {
+ if r == nil || r.State == nil {
+ return ""
+ }
+ return *r.State
+}
+
+// GetTargetURL returns the TargetURL field if it's non-nil, zero value otherwise.
+func (r *RepoStatus) GetTargetURL() string {
+ if r == nil || r.TargetURL == nil {
+ return ""
+ }
+ return *r.TargetURL
+}
+
+// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise.
+func (r *RepoStatus) GetUpdatedAt() time.Time {
+ if r == nil || r.UpdatedAt == nil {
+ return time.Time{}
+ }
+ return *r.UpdatedAt
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (r *RepoStatus) GetURL() string {
+ if r == nil || r.URL == nil {
+ return ""
+ }
+ return *r.URL
+}
+
+// GetName returns the Name field if it's non-nil, zero value otherwise.
+func (s *ServiceHook) GetName() string {
+ if s == nil || s.Name == nil {
+ return ""
+ }
+ return *s.Name
+}
+
+// GetPayload returns the Payload field if it's non-nil, zero value otherwise.
+func (s *SignatureVerification) GetPayload() string {
+ if s == nil || s.Payload == nil {
+ return ""
+ }
+ return *s.Payload
+}
+
+// GetReason returns the Reason field if it's non-nil, zero value otherwise.
+func (s *SignatureVerification) GetReason() string {
+ if s == nil || s.Reason == nil {
+ return ""
+ }
+ return *s.Reason
+}
+
+// GetSignature returns the Signature field if it's non-nil, zero value otherwise.
+func (s *SignatureVerification) GetSignature() string {
+ if s == nil || s.Signature == nil {
+ return ""
+ }
+ return *s.Signature
+}
+
+// GetVerified returns the Verified field if it's non-nil, zero value otherwise.
+func (s *SignatureVerification) GetVerified() bool {
+ if s == nil || s.Verified == nil {
+ return false
+ }
+ return *s.Verified
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (s *Source) GetID() int {
+ if s == nil || s.ID == nil {
+ return 0
+ }
+ return *s.ID
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (s *Source) GetURL() string {
+ if s == nil || s.URL == nil {
+ return ""
+ }
+ return *s.URL
+}
+
+// GetEmail returns the Email field if it's non-nil, zero value otherwise.
+func (s *SourceImportAuthor) GetEmail() string {
+ if s == nil || s.Email == nil {
+ return ""
+ }
+ return *s.Email
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (s *SourceImportAuthor) GetID() int {
+ if s == nil || s.ID == nil {
+ return 0
+ }
+ return *s.ID
+}
+
+// GetImportURL returns the ImportURL field if it's non-nil, zero value otherwise.
+func (s *SourceImportAuthor) GetImportURL() string {
+ if s == nil || s.ImportURL == nil {
+ return ""
+ }
+ return *s.ImportURL
+}
+
+// GetName returns the Name field if it's non-nil, zero value otherwise.
+func (s *SourceImportAuthor) GetName() string {
+ if s == nil || s.Name == nil {
+ return ""
+ }
+ return *s.Name
+}
+
+// GetRemoteID returns the RemoteID field if it's non-nil, zero value otherwise.
+func (s *SourceImportAuthor) GetRemoteID() string {
+ if s == nil || s.RemoteID == nil {
+ return ""
+ }
+ return *s.RemoteID
+}
+
+// GetRemoteName returns the RemoteName field if it's non-nil, zero value otherwise.
+func (s *SourceImportAuthor) GetRemoteName() string {
+ if s == nil || s.RemoteName == nil {
+ return ""
+ }
+ return *s.RemoteName
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (s *SourceImportAuthor) GetURL() string {
+ if s == nil || s.URL == nil {
+ return ""
+ }
+ return *s.URL
+}
+
+// GetStarredAt returns the StarredAt field if it's non-nil, zero value otherwise.
+func (s *Stargazer) GetStarredAt() Timestamp {
+ if s == nil || s.StarredAt == nil {
+ return Timestamp{}
+ }
+ return *s.StarredAt
+}
+
+// GetStarredAt returns the StarredAt field if it's non-nil, zero value otherwise.
+func (s *StarredRepository) GetStarredAt() Timestamp {
+ if s == nil || s.StarredAt == nil {
+ return Timestamp{}
+ }
+ return *s.StarredAt
+}
+
+// GetExcludeAttachments returns the ExcludeAttachments field if it's non-nil, zero value otherwise.
+func (s *startMigration) GetExcludeAttachments() bool {
+ if s == nil || s.ExcludeAttachments == nil {
+ return false
+ }
+ return *s.ExcludeAttachments
+}
+
+// GetLockRepositories returns the LockRepositories field if it's non-nil, zero value otherwise.
+func (s *startMigration) GetLockRepositories() bool {
+ if s == nil || s.LockRepositories == nil {
+ return false
+ }
+ return *s.LockRepositories
+}
+
+// GetContext returns the Context field if it's non-nil, zero value otherwise.
+func (s *StatusEvent) GetContext() string {
+ if s == nil || s.Context == nil {
+ return ""
+ }
+ return *s.Context
+}
+
+// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
+func (s *StatusEvent) GetCreatedAt() Timestamp {
+ if s == nil || s.CreatedAt == nil {
+ return Timestamp{}
+ }
+ return *s.CreatedAt
+}
+
+// GetDescription returns the Description field if it's non-nil, zero value otherwise.
+func (s *StatusEvent) GetDescription() string {
+ if s == nil || s.Description == nil {
+ return ""
+ }
+ return *s.Description
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (s *StatusEvent) GetID() int {
+ if s == nil || s.ID == nil {
+ return 0
+ }
+ return *s.ID
+}
+
+// GetName returns the Name field if it's non-nil, zero value otherwise.
+func (s *StatusEvent) GetName() string {
+ if s == nil || s.Name == nil {
+ return ""
+ }
+ return *s.Name
+}
+
+// GetSHA returns the SHA field if it's non-nil, zero value otherwise.
+func (s *StatusEvent) GetSHA() string {
+ if s == nil || s.SHA == nil {
+ return ""
+ }
+ return *s.SHA
+}
+
+// GetState returns the State field if it's non-nil, zero value otherwise.
+func (s *StatusEvent) GetState() string {
+ if s == nil || s.State == nil {
+ return ""
+ }
+ return *s.State
+}
+
+// GetTargetURL returns the TargetURL field if it's non-nil, zero value otherwise.
+func (s *StatusEvent) GetTargetURL() string {
+ if s == nil || s.TargetURL == nil {
+ return ""
+ }
+ return *s.TargetURL
+}
+
+// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise.
+func (s *StatusEvent) GetUpdatedAt() Timestamp {
+ if s == nil || s.UpdatedAt == nil {
+ return Timestamp{}
+ }
+ return *s.UpdatedAt
+}
+
+// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
+func (s *Subscription) GetCreatedAt() Timestamp {
+ if s == nil || s.CreatedAt == nil {
+ return Timestamp{}
+ }
+ return *s.CreatedAt
+}
+
+// GetIgnored returns the Ignored field if it's non-nil, zero value otherwise.
+func (s *Subscription) GetIgnored() bool {
+ if s == nil || s.Ignored == nil {
+ return false
+ }
+ return *s.Ignored
+}
+
+// GetReason returns the Reason field if it's non-nil, zero value otherwise.
+func (s *Subscription) GetReason() string {
+ if s == nil || s.Reason == nil {
+ return ""
+ }
+ return *s.Reason
+}
+
+// GetRepositoryURL returns the RepositoryURL field if it's non-nil, zero value otherwise.
+func (s *Subscription) GetRepositoryURL() string {
+ if s == nil || s.RepositoryURL == nil {
+ return ""
+ }
+ return *s.RepositoryURL
+}
+
+// GetSubscribed returns the Subscribed field if it's non-nil, zero value otherwise.
+func (s *Subscription) GetSubscribed() bool {
+ if s == nil || s.Subscribed == nil {
+ return false
+ }
+ return *s.Subscribed
+}
+
+// GetThreadURL returns the ThreadURL field if it's non-nil, zero value otherwise.
+func (s *Subscription) GetThreadURL() string {
+ if s == nil || s.ThreadURL == nil {
+ return ""
+ }
+ return *s.ThreadURL
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (s *Subscription) GetURL() string {
+ if s == nil || s.URL == nil {
+ return ""
+ }
+ return *s.URL
+}
+
+// GetMessage returns the Message field if it's non-nil, zero value otherwise.
+func (t *Tag) GetMessage() string {
+ if t == nil || t.Message == nil {
+ return ""
+ }
+ return *t.Message
+}
+
+// GetSHA returns the SHA field if it's non-nil, zero value otherwise.
+func (t *Tag) GetSHA() string {
+ if t == nil || t.SHA == nil {
+ return ""
+ }
+ return *t.SHA
+}
+
+// GetTag returns the Tag field if it's non-nil, zero value otherwise.
+func (t *Tag) GetTag() string {
+ if t == nil || t.Tag == nil {
+ return ""
+ }
+ return *t.Tag
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (t *Tag) GetURL() string {
+ if t == nil || t.URL == nil {
+ return ""
+ }
+ return *t.URL
+}
+
+// GetDescription returns the Description field if it's non-nil, zero value otherwise.
+func (t *Team) GetDescription() string {
+ if t == nil || t.Description == nil {
+ return ""
+ }
+ return *t.Description
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (t *Team) GetID() int {
+ if t == nil || t.ID == nil {
+ return 0
+ }
+ return *t.ID
+}
+
+// GetLDAPDN returns the LDAPDN field if it's non-nil, zero value otherwise.
+func (t *Team) GetLDAPDN() string {
+ if t == nil || t.LDAPDN == nil {
+ return ""
+ }
+ return *t.LDAPDN
+}
+
+// GetMembersCount returns the MembersCount field if it's non-nil, zero value otherwise.
+func (t *Team) GetMembersCount() int {
+ if t == nil || t.MembersCount == nil {
+ return 0
+ }
+ return *t.MembersCount
+}
+
+// GetMembersURL returns the MembersURL field if it's non-nil, zero value otherwise.
+func (t *Team) GetMembersURL() string {
+ if t == nil || t.MembersURL == nil {
+ return ""
+ }
+ return *t.MembersURL
+}
+
+// GetName returns the Name field if it's non-nil, zero value otherwise.
+func (t *Team) GetName() string {
+ if t == nil || t.Name == nil {
+ return ""
+ }
+ return *t.Name
+}
+
+// GetPermission returns the Permission field if it's non-nil, zero value otherwise.
+func (t *Team) GetPermission() string {
+ if t == nil || t.Permission == nil {
+ return ""
+ }
+ return *t.Permission
+}
+
+// GetPrivacy returns the Privacy field if it's non-nil, zero value otherwise.
+func (t *Team) GetPrivacy() string {
+ if t == nil || t.Privacy == nil {
+ return ""
+ }
+ return *t.Privacy
+}
+
+// GetReposCount returns the ReposCount field if it's non-nil, zero value otherwise.
+func (t *Team) GetReposCount() int {
+ if t == nil || t.ReposCount == nil {
+ return 0
+ }
+ return *t.ReposCount
+}
+
+// GetRepositoriesURL returns the RepositoriesURL field if it's non-nil, zero value otherwise.
+func (t *Team) GetRepositoriesURL() string {
+ if t == nil || t.RepositoriesURL == nil {
+ return ""
+ }
+ return *t.RepositoriesURL
+}
+
+// GetSlug returns the Slug field if it's non-nil, zero value otherwise.
+func (t *Team) GetSlug() string {
+ if t == nil || t.Slug == nil {
+ return ""
+ }
+ return *t.Slug
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (t *Team) GetURL() string {
+ if t == nil || t.URL == nil {
+ return ""
+ }
+ return *t.URL
+}
+
+// GetDescription returns the Description field if it's non-nil, zero value otherwise.
+func (t *TeamLDAPMapping) GetDescription() string {
+ if t == nil || t.Description == nil {
+ return ""
+ }
+ return *t.Description
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (t *TeamLDAPMapping) GetID() int {
+ if t == nil || t.ID == nil {
+ return 0
+ }
+ return *t.ID
+}
+
+// GetLDAPDN returns the LDAPDN field if it's non-nil, zero value otherwise.
+func (t *TeamLDAPMapping) GetLDAPDN() string {
+ if t == nil || t.LDAPDN == nil {
+ return ""
+ }
+ return *t.LDAPDN
+}
+
+// GetMembersURL returns the MembersURL field if it's non-nil, zero value otherwise.
+func (t *TeamLDAPMapping) GetMembersURL() string {
+ if t == nil || t.MembersURL == nil {
+ return ""
+ }
+ return *t.MembersURL
+}
+
+// GetName returns the Name field if it's non-nil, zero value otherwise.
+func (t *TeamLDAPMapping) GetName() string {
+ if t == nil || t.Name == nil {
+ return ""
+ }
+ return *t.Name
+}
+
+// GetPermission returns the Permission field if it's non-nil, zero value otherwise.
+func (t *TeamLDAPMapping) GetPermission() string {
+ if t == nil || t.Permission == nil {
+ return ""
+ }
+ return *t.Permission
+}
+
+// GetPrivacy returns the Privacy field if it's non-nil, zero value otherwise.
+func (t *TeamLDAPMapping) GetPrivacy() string {
+ if t == nil || t.Privacy == nil {
+ return ""
+ }
+ return *t.Privacy
+}
+
+// GetRepositoriesURL returns the RepositoriesURL field if it's non-nil, zero value otherwise.
+func (t *TeamLDAPMapping) GetRepositoriesURL() string {
+ if t == nil || t.RepositoriesURL == nil {
+ return ""
+ }
+ return *t.RepositoriesURL
+}
+
+// GetSlug returns the Slug field if it's non-nil, zero value otherwise.
+func (t *TeamLDAPMapping) GetSlug() string {
+ if t == nil || t.Slug == nil {
+ return ""
+ }
+ return *t.Slug
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (t *TeamLDAPMapping) GetURL() string {
+ if t == nil || t.URL == nil {
+ return ""
+ }
+ return *t.URL
+}
+
+// GetFragment returns the Fragment field if it's non-nil, zero value otherwise.
+func (t *TextMatch) GetFragment() string {
+ if t == nil || t.Fragment == nil {
+ return ""
+ }
+ return *t.Fragment
+}
+
+// GetObjectType returns the ObjectType field if it's non-nil, zero value otherwise.
+func (t *TextMatch) GetObjectType() string {
+ if t == nil || t.ObjectType == nil {
+ return ""
+ }
+ return *t.ObjectType
+}
+
+// GetObjectURL returns the ObjectURL field if it's non-nil, zero value otherwise.
+func (t *TextMatch) GetObjectURL() string {
+ if t == nil || t.ObjectURL == nil {
+ return ""
+ }
+ return *t.ObjectURL
+}
+
+// GetProperty returns the Property field if it's non-nil, zero value otherwise.
+func (t *TextMatch) GetProperty() string {
+ if t == nil || t.Property == nil {
+ return ""
+ }
+ return *t.Property
+}
+
+// GetCommitID returns the CommitID field if it's non-nil, zero value otherwise.
+func (t *Timeline) GetCommitID() string {
+ if t == nil || t.CommitID == nil {
+ return ""
+ }
+ return *t.CommitID
+}
+
+// GetCommitURL returns the CommitURL field if it's non-nil, zero value otherwise.
+func (t *Timeline) GetCommitURL() string {
+ if t == nil || t.CommitURL == nil {
+ return ""
+ }
+ return *t.CommitURL
+}
+
+// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
+func (t *Timeline) GetCreatedAt() time.Time {
+ if t == nil || t.CreatedAt == nil {
+ return time.Time{}
+ }
+ return *t.CreatedAt
+}
+
+// GetEvent returns the Event field if it's non-nil, zero value otherwise.
+func (t *Timeline) GetEvent() string {
+ if t == nil || t.Event == nil {
+ return ""
+ }
+ return *t.Event
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (t *Timeline) GetID() int {
+ if t == nil || t.ID == nil {
+ return 0
+ }
+ return *t.ID
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (t *Timeline) GetURL() string {
+ if t == nil || t.URL == nil {
+ return ""
+ }
+ return *t.URL
+}
+
+// GetCount returns the Count field if it's non-nil, zero value otherwise.
+func (t *TrafficClones) GetCount() int {
+ if t == nil || t.Count == nil {
+ return 0
+ }
+ return *t.Count
+}
+
+// GetUniques returns the Uniques field if it's non-nil, zero value otherwise.
+func (t *TrafficClones) GetUniques() int {
+ if t == nil || t.Uniques == nil {
+ return 0
+ }
+ return *t.Uniques
+}
+
+// GetCount returns the Count field if it's non-nil, zero value otherwise.
+func (t *TrafficData) GetCount() int {
+ if t == nil || t.Count == nil {
+ return 0
+ }
+ return *t.Count
+}
+
+// GetTimestamp returns the Timestamp field if it's non-nil, zero value otherwise.
+func (t *TrafficData) GetTimestamp() Timestamp {
+ if t == nil || t.Timestamp == nil {
+ return Timestamp{}
+ }
+ return *t.Timestamp
+}
+
+// GetUniques returns the Uniques field if it's non-nil, zero value otherwise.
+func (t *TrafficData) GetUniques() int {
+ if t == nil || t.Uniques == nil {
+ return 0
+ }
+ return *t.Uniques
+}
+
+// GetCount returns the Count field if it's non-nil, zero value otherwise.
+func (t *TrafficPath) GetCount() int {
+ if t == nil || t.Count == nil {
+ return 0
+ }
+ return *t.Count
+}
+
+// GetPath returns the Path field if it's non-nil, zero value otherwise.
+func (t *TrafficPath) GetPath() string {
+ if t == nil || t.Path == nil {
+ return ""
+ }
+ return *t.Path
+}
+
+// GetTitle returns the Title field if it's non-nil, zero value otherwise.
+func (t *TrafficPath) GetTitle() string {
+ if t == nil || t.Title == nil {
+ return ""
+ }
+ return *t.Title
+}
+
+// GetUniques returns the Uniques field if it's non-nil, zero value otherwise.
+func (t *TrafficPath) GetUniques() int {
+ if t == nil || t.Uniques == nil {
+ return 0
+ }
+ return *t.Uniques
+}
+
+// GetCount returns the Count field if it's non-nil, zero value otherwise.
+func (t *TrafficReferrer) GetCount() int {
+ if t == nil || t.Count == nil {
+ return 0
+ }
+ return *t.Count
+}
+
+// GetReferrer returns the Referrer field if it's non-nil, zero value otherwise.
+func (t *TrafficReferrer) GetReferrer() string {
+ if t == nil || t.Referrer == nil {
+ return ""
+ }
+ return *t.Referrer
+}
+
+// GetUniques returns the Uniques field if it's non-nil, zero value otherwise.
+func (t *TrafficReferrer) GetUniques() int {
+ if t == nil || t.Uniques == nil {
+ return 0
+ }
+ return *t.Uniques
+}
+
+// GetCount returns the Count field if it's non-nil, zero value otherwise.
+func (t *TrafficViews) GetCount() int {
+ if t == nil || t.Count == nil {
+ return 0
+ }
+ return *t.Count
+}
+
+// GetUniques returns the Uniques field if it's non-nil, zero value otherwise.
+func (t *TrafficViews) GetUniques() int {
+ if t == nil || t.Uniques == nil {
+ return 0
+ }
+ return *t.Uniques
+}
+
+// GetSHA returns the SHA field if it's non-nil, zero value otherwise.
+func (t *Tree) GetSHA() string {
+ if t == nil || t.SHA == nil {
+ return ""
+ }
+ return *t.SHA
+}
+
+// GetContent returns the Content field if it's non-nil, zero value otherwise.
+func (t *TreeEntry) GetContent() string {
+ if t == nil || t.Content == nil {
+ return ""
+ }
+ return *t.Content
+}
+
+// GetMode returns the Mode field if it's non-nil, zero value otherwise.
+func (t *TreeEntry) GetMode() string {
+ if t == nil || t.Mode == nil {
+ return ""
+ }
+ return *t.Mode
+}
+
+// GetPath returns the Path field if it's non-nil, zero value otherwise.
+func (t *TreeEntry) GetPath() string {
+ if t == nil || t.Path == nil {
+ return ""
+ }
+ return *t.Path
+}
+
+// GetSHA returns the SHA field if it's non-nil, zero value otherwise.
+func (t *TreeEntry) GetSHA() string {
+ if t == nil || t.SHA == nil {
+ return ""
+ }
+ return *t.SHA
+}
+
+// GetSize returns the Size field if it's non-nil, zero value otherwise.
+func (t *TreeEntry) GetSize() int {
+ if t == nil || t.Size == nil {
+ return 0
+ }
+ return *t.Size
+}
+
+// GetType returns the Type field if it's non-nil, zero value otherwise.
+func (t *TreeEntry) GetType() string {
+ if t == nil || t.Type == nil {
+ return ""
+ }
+ return *t.Type
+}
+
+// GetForce returns the Force field if it's non-nil, zero value otherwise.
+func (u *updateRefRequest) GetForce() bool {
+ if u == nil || u.Force == nil {
+ return false
+ }
+ return *u.Force
+}
+
+// GetSHA returns the SHA field if it's non-nil, zero value otherwise.
+func (u *updateRefRequest) GetSHA() string {
+ if u == nil || u.SHA == nil {
+ return ""
+ }
+ return *u.SHA
+}
+
+// GetAvatarURL returns the AvatarURL field if it's non-nil, zero value otherwise.
+func (u *User) GetAvatarURL() string {
+ if u == nil || u.AvatarURL == nil {
+ return ""
+ }
+ return *u.AvatarURL
+}
+
+// GetBio returns the Bio field if it's non-nil, zero value otherwise.
+func (u *User) GetBio() string {
+ if u == nil || u.Bio == nil {
+ return ""
+ }
+ return *u.Bio
+}
+
+// GetBlog returns the Blog field if it's non-nil, zero value otherwise.
+func (u *User) GetBlog() string {
+ if u == nil || u.Blog == nil {
+ return ""
+ }
+ return *u.Blog
+}
+
+// GetCollaborators returns the Collaborators field if it's non-nil, zero value otherwise.
+func (u *User) GetCollaborators() int {
+ if u == nil || u.Collaborators == nil {
+ return 0
+ }
+ return *u.Collaborators
+}
+
+// GetCompany returns the Company field if it's non-nil, zero value otherwise.
+func (u *User) GetCompany() string {
+ if u == nil || u.Company == nil {
+ return ""
+ }
+ return *u.Company
+}
+
+// GetCreatedAt returns the CreatedAt field if it's non-nil, zero value otherwise.
+func (u *User) GetCreatedAt() Timestamp {
+ if u == nil || u.CreatedAt == nil {
+ return Timestamp{}
+ }
+ return *u.CreatedAt
+}
+
+// GetDiskUsage returns the DiskUsage field if it's non-nil, zero value otherwise.
+func (u *User) GetDiskUsage() int {
+ if u == nil || u.DiskUsage == nil {
+ return 0
+ }
+ return *u.DiskUsage
+}
+
+// GetEmail returns the Email field if it's non-nil, zero value otherwise.
+func (u *User) GetEmail() string {
+ if u == nil || u.Email == nil {
+ return ""
+ }
+ return *u.Email
+}
+
+// GetEventsURL returns the EventsURL field if it's non-nil, zero value otherwise.
+func (u *User) GetEventsURL() string {
+ if u == nil || u.EventsURL == nil {
+ return ""
+ }
+ return *u.EventsURL
+}
+
+// GetFollowers returns the Followers field if it's non-nil, zero value otherwise.
+func (u *User) GetFollowers() int {
+ if u == nil || u.Followers == nil {
+ return 0
+ }
+ return *u.Followers
+}
+
+// GetFollowersURL returns the FollowersURL field if it's non-nil, zero value otherwise.
+func (u *User) GetFollowersURL() string {
+ if u == nil || u.FollowersURL == nil {
+ return ""
+ }
+ return *u.FollowersURL
+}
+
+// GetFollowing returns the Following field if it's non-nil, zero value otherwise.
+func (u *User) GetFollowing() int {
+ if u == nil || u.Following == nil {
+ return 0
+ }
+ return *u.Following
+}
+
+// GetFollowingURL returns the FollowingURL field if it's non-nil, zero value otherwise.
+func (u *User) GetFollowingURL() string {
+ if u == nil || u.FollowingURL == nil {
+ return ""
+ }
+ return *u.FollowingURL
+}
+
+// GetGistsURL returns the GistsURL field if it's non-nil, zero value otherwise.
+func (u *User) GetGistsURL() string {
+ if u == nil || u.GistsURL == nil {
+ return ""
+ }
+ return *u.GistsURL
+}
+
+// GetGravatarID returns the GravatarID field if it's non-nil, zero value otherwise.
+func (u *User) GetGravatarID() string {
+ if u == nil || u.GravatarID == nil {
+ return ""
+ }
+ return *u.GravatarID
+}
+
+// GetHireable returns the Hireable field if it's non-nil, zero value otherwise.
+func (u *User) GetHireable() bool {
+ if u == nil || u.Hireable == nil {
+ return false
+ }
+ return *u.Hireable
+}
+
+// GetHTMLURL returns the HTMLURL field if it's non-nil, zero value otherwise.
+func (u *User) GetHTMLURL() string {
+ if u == nil || u.HTMLURL == nil {
+ return ""
+ }
+ return *u.HTMLURL
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (u *User) GetID() int {
+ if u == nil || u.ID == nil {
+ return 0
+ }
+ return *u.ID
+}
+
+// GetLocation returns the Location field if it's non-nil, zero value otherwise.
+func (u *User) GetLocation() string {
+ if u == nil || u.Location == nil {
+ return ""
+ }
+ return *u.Location
+}
+
+// GetLogin returns the Login field if it's non-nil, zero value otherwise.
+func (u *User) GetLogin() string {
+ if u == nil || u.Login == nil {
+ return ""
+ }
+ return *u.Login
+}
+
+// GetName returns the Name field if it's non-nil, zero value otherwise.
+func (u *User) GetName() string {
+ if u == nil || u.Name == nil {
+ return ""
+ }
+ return *u.Name
+}
+
+// GetOrganizationsURL returns the OrganizationsURL field if it's non-nil, zero value otherwise.
+func (u *User) GetOrganizationsURL() string {
+ if u == nil || u.OrganizationsURL == nil {
+ return ""
+ }
+ return *u.OrganizationsURL
+}
+
+// GetOwnedPrivateRepos returns the OwnedPrivateRepos field if it's non-nil, zero value otherwise.
+func (u *User) GetOwnedPrivateRepos() int {
+ if u == nil || u.OwnedPrivateRepos == nil {
+ return 0
+ }
+ return *u.OwnedPrivateRepos
+}
+
+// GetPermissions returns the Permissions field if it's non-nil, zero value otherwise.
+func (u *User) GetPermissions() map[string]bool {
+ if u == nil || u.Permissions == nil {
+ return map[string]bool{}
+ }
+ return *u.Permissions
+}
+
+// GetPrivateGists returns the PrivateGists field if it's non-nil, zero value otherwise.
+func (u *User) GetPrivateGists() int {
+ if u == nil || u.PrivateGists == nil {
+ return 0
+ }
+ return *u.PrivateGists
+}
+
+// GetPublicGists returns the PublicGists field if it's non-nil, zero value otherwise.
+func (u *User) GetPublicGists() int {
+ if u == nil || u.PublicGists == nil {
+ return 0
+ }
+ return *u.PublicGists
+}
+
+// GetPublicRepos returns the PublicRepos field if it's non-nil, zero value otherwise.
+func (u *User) GetPublicRepos() int {
+ if u == nil || u.PublicRepos == nil {
+ return 0
+ }
+ return *u.PublicRepos
+}
+
+// GetReceivedEventsURL returns the ReceivedEventsURL field if it's non-nil, zero value otherwise.
+func (u *User) GetReceivedEventsURL() string {
+ if u == nil || u.ReceivedEventsURL == nil {
+ return ""
+ }
+ return *u.ReceivedEventsURL
+}
+
+// GetReposURL returns the ReposURL field if it's non-nil, zero value otherwise.
+func (u *User) GetReposURL() string {
+ if u == nil || u.ReposURL == nil {
+ return ""
+ }
+ return *u.ReposURL
+}
+
+// GetSiteAdmin returns the SiteAdmin field if it's non-nil, zero value otherwise.
+func (u *User) GetSiteAdmin() bool {
+ if u == nil || u.SiteAdmin == nil {
+ return false
+ }
+ return *u.SiteAdmin
+}
+
+// GetStarredURL returns the StarredURL field if it's non-nil, zero value otherwise.
+func (u *User) GetStarredURL() string {
+ if u == nil || u.StarredURL == nil {
+ return ""
+ }
+ return *u.StarredURL
+}
+
+// GetSubscriptionsURL returns the SubscriptionsURL field if it's non-nil, zero value otherwise.
+func (u *User) GetSubscriptionsURL() string {
+ if u == nil || u.SubscriptionsURL == nil {
+ return ""
+ }
+ return *u.SubscriptionsURL
+}
+
+// GetSuspendedAt returns the SuspendedAt field if it's non-nil, zero value otherwise.
+func (u *User) GetSuspendedAt() Timestamp {
+ if u == nil || u.SuspendedAt == nil {
+ return Timestamp{}
+ }
+ return *u.SuspendedAt
+}
+
+// GetTotalPrivateRepos returns the TotalPrivateRepos field if it's non-nil, zero value otherwise.
+func (u *User) GetTotalPrivateRepos() int {
+ if u == nil || u.TotalPrivateRepos == nil {
+ return 0
+ }
+ return *u.TotalPrivateRepos
+}
+
+// GetType returns the Type field if it's non-nil, zero value otherwise.
+func (u *User) GetType() string {
+ if u == nil || u.Type == nil {
+ return ""
+ }
+ return *u.Type
+}
+
+// GetUpdatedAt returns the UpdatedAt field if it's non-nil, zero value otherwise.
+func (u *User) GetUpdatedAt() Timestamp {
+ if u == nil || u.UpdatedAt == nil {
+ return Timestamp{}
+ }
+ return *u.UpdatedAt
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (u *User) GetURL() string {
+ if u == nil || u.URL == nil {
+ return ""
+ }
+ return *u.URL
+}
+
+// GetEmail returns the Email field if it's non-nil, zero value otherwise.
+func (u *UserEmail) GetEmail() string {
+ if u == nil || u.Email == nil {
+ return ""
+ }
+ return *u.Email
+}
+
+// GetPrimary returns the Primary field if it's non-nil, zero value otherwise.
+func (u *UserEmail) GetPrimary() bool {
+ if u == nil || u.Primary == nil {
+ return false
+ }
+ return *u.Primary
+}
+
+// GetVerified returns the Verified field if it's non-nil, zero value otherwise.
+func (u *UserEmail) GetVerified() bool {
+ if u == nil || u.Verified == nil {
+ return false
+ }
+ return *u.Verified
+}
+
+// GetAvatarURL returns the AvatarURL field if it's non-nil, zero value otherwise.
+func (u *UserLDAPMapping) GetAvatarURL() string {
+ if u == nil || u.AvatarURL == nil {
+ return ""
+ }
+ return *u.AvatarURL
+}
+
+// GetEventsURL returns the EventsURL field if it's non-nil, zero value otherwise.
+func (u *UserLDAPMapping) GetEventsURL() string {
+ if u == nil || u.EventsURL == nil {
+ return ""
+ }
+ return *u.EventsURL
+}
+
+// GetFollowersURL returns the FollowersURL field if it's non-nil, zero value otherwise.
+func (u *UserLDAPMapping) GetFollowersURL() string {
+ if u == nil || u.FollowersURL == nil {
+ return ""
+ }
+ return *u.FollowersURL
+}
+
+// GetFollowingURL returns the FollowingURL field if it's non-nil, zero value otherwise.
+func (u *UserLDAPMapping) GetFollowingURL() string {
+ if u == nil || u.FollowingURL == nil {
+ return ""
+ }
+ return *u.FollowingURL
+}
+
+// GetGistsURL returns the GistsURL field if it's non-nil, zero value otherwise.
+func (u *UserLDAPMapping) GetGistsURL() string {
+ if u == nil || u.GistsURL == nil {
+ return ""
+ }
+ return *u.GistsURL
+}
+
+// GetGravatarID returns the GravatarID field if it's non-nil, zero value otherwise.
+func (u *UserLDAPMapping) GetGravatarID() string {
+ if u == nil || u.GravatarID == nil {
+ return ""
+ }
+ return *u.GravatarID
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (u *UserLDAPMapping) GetID() int {
+ if u == nil || u.ID == nil {
+ return 0
+ }
+ return *u.ID
+}
+
+// GetLDAPDN returns the LDAPDN field if it's non-nil, zero value otherwise.
+func (u *UserLDAPMapping) GetLDAPDN() string {
+ if u == nil || u.LDAPDN == nil {
+ return ""
+ }
+ return *u.LDAPDN
+}
+
+// GetLogin returns the Login field if it's non-nil, zero value otherwise.
+func (u *UserLDAPMapping) GetLogin() string {
+ if u == nil || u.Login == nil {
+ return ""
+ }
+ return *u.Login
+}
+
+// GetOrganizationsURL returns the OrganizationsURL field if it's non-nil, zero value otherwise.
+func (u *UserLDAPMapping) GetOrganizationsURL() string {
+ if u == nil || u.OrganizationsURL == nil {
+ return ""
+ }
+ return *u.OrganizationsURL
+}
+
+// GetReceivedEventsURL returns the ReceivedEventsURL field if it's non-nil, zero value otherwise.
+func (u *UserLDAPMapping) GetReceivedEventsURL() string {
+ if u == nil || u.ReceivedEventsURL == nil {
+ return ""
+ }
+ return *u.ReceivedEventsURL
+}
+
+// GetReposURL returns the ReposURL field if it's non-nil, zero value otherwise.
+func (u *UserLDAPMapping) GetReposURL() string {
+ if u == nil || u.ReposURL == nil {
+ return ""
+ }
+ return *u.ReposURL
+}
+
+// GetSiteAdmin returns the SiteAdmin field if it's non-nil, zero value otherwise.
+func (u *UserLDAPMapping) GetSiteAdmin() bool {
+ if u == nil || u.SiteAdmin == nil {
+ return false
+ }
+ return *u.SiteAdmin
+}
+
+// GetStarredURL returns the StarredURL field if it's non-nil, zero value otherwise.
+func (u *UserLDAPMapping) GetStarredURL() string {
+ if u == nil || u.StarredURL == nil {
+ return ""
+ }
+ return *u.StarredURL
+}
+
+// GetSubscriptionsURL returns the SubscriptionsURL field if it's non-nil, zero value otherwise.
+func (u *UserLDAPMapping) GetSubscriptionsURL() string {
+ if u == nil || u.SubscriptionsURL == nil {
+ return ""
+ }
+ return *u.SubscriptionsURL
+}
+
+// GetType returns the Type field if it's non-nil, zero value otherwise.
+func (u *UserLDAPMapping) GetType() string {
+ if u == nil || u.Type == nil {
+ return ""
+ }
+ return *u.Type
+}
+
+// GetURL returns the URL field if it's non-nil, zero value otherwise.
+func (u *UserLDAPMapping) GetURL() string {
+ if u == nil || u.URL == nil {
+ return ""
+ }
+ return *u.URL
+}
+
+// GetIncompleteResults returns the IncompleteResults field if it's non-nil, zero value otherwise.
+func (u *UsersSearchResult) GetIncompleteResults() bool {
+ if u == nil || u.IncompleteResults == nil {
+ return false
+ }
+ return *u.IncompleteResults
+}
+
+// GetTotal returns the Total field if it's non-nil, zero value otherwise.
+func (u *UsersSearchResult) GetTotal() int {
+ if u == nil || u.Total == nil {
+ return 0
+ }
+ return *u.Total
+}
+
+// GetAction returns the Action field if it's non-nil, zero value otherwise.
+func (w *WatchEvent) GetAction() string {
+ if w == nil || w.Action == nil {
+ return ""
+ }
+ return *w.Action
+}
+
+// GetEmail returns the Email field if it's non-nil, zero value otherwise.
+func (w *WebHookAuthor) GetEmail() string {
+ if w == nil || w.Email == nil {
+ return ""
+ }
+ return *w.Email
+}
+
+// GetName returns the Name field if it's non-nil, zero value otherwise.
+func (w *WebHookAuthor) GetName() string {
+ if w == nil || w.Name == nil {
+ return ""
+ }
+ return *w.Name
+}
+
+// GetUsername returns the Username field if it's non-nil, zero value otherwise.
+func (w *WebHookAuthor) GetUsername() string {
+ if w == nil || w.Username == nil {
+ return ""
+ }
+ return *w.Username
+}
+
+// GetDistinct returns the Distinct field if it's non-nil, zero value otherwise.
+func (w *WebHookCommit) GetDistinct() bool {
+ if w == nil || w.Distinct == nil {
+ return false
+ }
+ return *w.Distinct
+}
+
+// GetID returns the ID field if it's non-nil, zero value otherwise.
+func (w *WebHookCommit) GetID() string {
+ if w == nil || w.ID == nil {
+ return ""
+ }
+ return *w.ID
+}
+
+// GetMessage returns the Message field if it's non-nil, zero value otherwise.
+func (w *WebHookCommit) GetMessage() string {
+ if w == nil || w.Message == nil {
+ return ""
+ }
+ return *w.Message
+}
+
+// GetTimestamp returns the Timestamp field if it's non-nil, zero value otherwise.
+func (w *WebHookCommit) GetTimestamp() time.Time {
+ if w == nil || w.Timestamp == nil {
+ return time.Time{}
+ }
+ return *w.Timestamp
+}
+
+// GetAfter returns the After field if it's non-nil, zero value otherwise.
+func (w *WebHookPayload) GetAfter() string {
+ if w == nil || w.After == nil {
+ return ""
+ }
+ return *w.After
+}
+
+// GetBefore returns the Before field if it's non-nil, zero value otherwise.
+func (w *WebHookPayload) GetBefore() string {
+ if w == nil || w.Before == nil {
+ return ""
+ }
+ return *w.Before
+}
+
+// GetCompare returns the Compare field if it's non-nil, zero value otherwise.
+func (w *WebHookPayload) GetCompare() string {
+ if w == nil || w.Compare == nil {
+ return ""
+ }
+ return *w.Compare
+}
+
+// GetCreated returns the Created field if it's non-nil, zero value otherwise.
+func (w *WebHookPayload) GetCreated() bool {
+ if w == nil || w.Created == nil {
+ return false
+ }
+ return *w.Created
+}
+
+// GetDeleted returns the Deleted field if it's non-nil, zero value otherwise.
+func (w *WebHookPayload) GetDeleted() bool {
+ if w == nil || w.Deleted == nil {
+ return false
+ }
+ return *w.Deleted
+}
+
+// GetForced returns the Forced field if it's non-nil, zero value otherwise.
+func (w *WebHookPayload) GetForced() bool {
+ if w == nil || w.Forced == nil {
+ return false
+ }
+ return *w.Forced
+}
+
+// GetRef returns the Ref field if it's non-nil, zero value otherwise.
+func (w *WebHookPayload) GetRef() string {
+ if w == nil || w.Ref == nil {
+ return ""
+ }
+ return *w.Ref
+}
+
+// GetTotal returns the Total field if it's non-nil, zero value otherwise.
+func (w *WeeklyCommitActivity) GetTotal() int {
+ if w == nil || w.Total == nil {
+ return 0
+ }
+ return *w.Total
+}
+
+// GetWeek returns the Week field if it's non-nil, zero value otherwise.
+func (w *WeeklyCommitActivity) GetWeek() Timestamp {
+ if w == nil || w.Week == nil {
+ return Timestamp{}
+ }
+ return *w.Week
+}
+
+// GetAdditions returns the Additions field if it's non-nil, zero value otherwise.
+func (w *WeeklyStats) GetAdditions() int {
+ if w == nil || w.Additions == nil {
+ return 0
+ }
+ return *w.Additions
+}
+
+// GetCommits returns the Commits field if it's non-nil, zero value otherwise.
+func (w *WeeklyStats) GetCommits() int {
+ if w == nil || w.Commits == nil {
+ return 0
+ }
+ return *w.Commits
+}
+
+// GetDeletions returns the Deletions field if it's non-nil, zero value otherwise.
+func (w *WeeklyStats) GetDeletions() int {
+ if w == nil || w.Deletions == nil {
+ return 0
+ }
+ return *w.Deletions
+}
+
+// GetWeek returns the Week field if it's non-nil, zero value otherwise.
+func (w *WeeklyStats) GetWeek() Timestamp {
+ if w == nil || w.Week == nil {
+ return Timestamp{}
+ }
+ return *w.Week
+}
diff --git a/vendor/github.com/google/go-github/github/github.go b/vendor/github.com/google/go-github/github/github.go
index 0f3145a..decdaaa 100644
--- a/vendor/github.com/google/go-github/github/github.go
+++ b/vendor/github.com/google/go-github/github/github.go
@@ -3,10 +3,13 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:generate go run gen-accessors.go
+
package github
import (
"bytes"
+ "context"
"encoding/json"
"errors"
"fmt"
@@ -24,7 +27,7 @@ import (
)
const (
- libraryVersion = "3"
+ libraryVersion = "6"
defaultBaseURL = "https://api.github.com/"
uploadBaseURL = "https://uploads.github.com/"
userAgent = "go-github/" + libraryVersion
@@ -86,14 +89,14 @@ const (
// https://developer.github.com/changes/2016-09-14-Integrations-Early-Access/
mediaTypeIntegrationPreview = "application/vnd.github.machine-man-preview+json"
- // https://developer.github.com/changes/2016-11-28-preview-org-membership/
- mediaTypeOrgMembershipPreview = "application/vnd.github.korra-preview+json"
-
// https://developer.github.com/changes/2017-01-05-commit-search-api/
mediaTypeCommitSearchPreview = "application/vnd.github.cloak-preview+json"
// https://developer.github.com/changes/2016-12-14-reviews-api/
mediaTypePullRequestReviewsPreview = "application/vnd.github.black-cat-preview+json"
+
+ // https://developer.github.com/changes/2017-02-28-user-blocking-apis-and-webhook/
+ mediaTypeBlockUsersPreview = "application/vnd.github.giant-sentry-fist-preview+json"
)
// A Client manages communication with the GitHub API.
@@ -383,7 +386,12 @@ func parseRate(r *http.Response) Rate {
// interface, the raw response body will be written to v, without attempting to
// first decode it. If rate limit is exceeded and reset time is in the future,
// Do returns *RateLimitError immediately without making a network API call.
-func (c *Client) Do(req *http.Request, v interface{}) (*Response, error) {
+//
+// The provided ctx must be non-nil. If it is canceled or times out,
+// ctx.Err() will be returned.
+func (c *Client) Do(ctx context.Context, req *http.Request, v interface{}) (*Response, error) {
+ ctx, req = withContext(ctx, req)
+
rateLimitCategory := category(req.URL.Path)
// If we've hit rate limit, don't make further requests before Reset time.
@@ -393,12 +401,22 @@ func (c *Client) Do(req *http.Request, v interface{}) (*Response, error) {
resp, err := c.client.Do(req)
if err != nil {
+ // If we got an error, and the context has been canceled,
+ // the context's error is probably more useful.
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
+ }
+
+ // If the error type is *url.Error, sanitize its URL before returning.
if e, ok := err.(*url.Error); ok {
if url, err := url.Parse(e.URL); err == nil {
e.URL = sanitizeURL(url).String()
return nil, e
}
}
+
return nil, err
}
@@ -711,7 +729,7 @@ func category(path string) rateLimitCategory {
}
// RateLimits returns the rate limits for the current client.
-func (c *Client) RateLimits() (*RateLimits, *Response, error) {
+func (c *Client) RateLimits(ctx context.Context) (*RateLimits, *Response, error) {
req, err := c.NewRequest("GET", "rate_limit", nil)
if err != nil {
return nil, nil, err
@@ -720,7 +738,7 @@ func (c *Client) RateLimits() (*RateLimits, *Response, error) {
response := new(struct {
Resources *RateLimits `json:"resources"`
})
- resp, err := c.Do(req, response)
+ resp, err := c.Do(ctx, req, response)
if err != nil {
return nil, nil, err
}
diff --git a/vendor/github.com/google/go-github/github/gitignore.go b/vendor/github.com/google/go-github/github/gitignore.go
index 396178d..2f691bc 100644
--- a/vendor/github.com/google/go-github/github/gitignore.go
+++ b/vendor/github.com/google/go-github/github/gitignore.go
@@ -5,7 +5,10 @@
package github
-import "fmt"
+import (
+ "context"
+ "fmt"
+)
// GitignoresService provides access to the gitignore related functions in the
// GitHub API.
@@ -25,15 +28,15 @@ func (g Gitignore) String() string {
// List all available Gitignore templates.
//
-// https://developer.github.com/v3/gitignore/#listing-available-templates
-func (s GitignoresService) List() ([]string, *Response, error) {
+// GitHub API docs: https://developer.github.com/v3/gitignore/#listing-available-templates
+func (s GitignoresService) List(ctx context.Context) ([]string, *Response, error) {
req, err := s.client.NewRequest("GET", "gitignore/templates", nil)
if err != nil {
return nil, nil, err
}
var availableTemplates []string
- resp, err := s.client.Do(req, &availableTemplates)
+ resp, err := s.client.Do(ctx, req, &availableTemplates)
if err != nil {
return nil, resp, err
}
@@ -43,8 +46,8 @@ func (s GitignoresService) List() ([]string, *Response, error) {
// Get a Gitignore by name.
//
-// https://developer.github.com/v3/gitignore/#get-a-single-template
-func (s GitignoresService) Get(name string) (*Gitignore, *Response, error) {
+// GitHub API docs: https://developer.github.com/v3/gitignore/#get-a-single-template
+func (s GitignoresService) Get(ctx context.Context, name string) (*Gitignore, *Response, error) {
u := fmt.Sprintf("gitignore/templates/%v", name)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -52,7 +55,7 @@ func (s GitignoresService) Get(name string) (*Gitignore, *Response, error) {
}
gitignore := new(Gitignore)
- resp, err := s.client.Do(req, gitignore)
+ resp, err := s.client.Do(ctx, req, gitignore)
if err != nil {
return nil, resp, err
}
diff --git a/vendor/github.com/google/go-github/github/integration.go b/vendor/github.com/google/go-github/github/integration.go
index 033ee44..6d74e44 100644
--- a/vendor/github.com/google/go-github/github/integration.go
+++ b/vendor/github.com/google/go-github/github/integration.go
@@ -5,6 +5,8 @@
package github
+import "context"
+
// IntegrationsService provides access to the installation related functions
// in the GitHub API.
//
@@ -14,7 +16,7 @@ type IntegrationsService service
// ListInstallations lists the installations that the current integration has.
//
// GitHub API docs: https://developer.github.com/v3/integrations/#find-installations
-func (s *IntegrationsService) ListInstallations(opt *ListOptions) ([]*Installation, *Response, error) {
+func (s *IntegrationsService) ListInstallations(ctx context.Context, opt *ListOptions) ([]*Installation, *Response, error) {
u, err := addOptions("integration/installations", opt)
if err != nil {
return nil, nil, err
@@ -29,7 +31,7 @@ func (s *IntegrationsService) ListInstallations(opt *ListOptions) ([]*Installati
req.Header.Set("Accept", mediaTypeIntegrationPreview)
var i []*Installation
- resp, err := s.client.Do(req, &i)
+ resp, err := s.client.Do(ctx, req, &i)
if err != nil {
return nil, resp, err
}
diff --git a/vendor/github.com/google/go-github/github/integration_installation.go b/vendor/github.com/google/go-github/github/integration_installation.go
index b130470..0836d86 100644
--- a/vendor/github.com/google/go-github/github/integration_installation.go
+++ b/vendor/github.com/google/go-github/github/integration_installation.go
@@ -5,12 +5,15 @@
package github
+import "context"
+
// Installation represents a GitHub integration installation.
type Installation struct {
ID *int `json:"id,omitempty"`
Account *User `json:"account,omitempty"`
AccessTokensURL *string `json:"access_tokens_url,omitempty"`
RepositoriesURL *string `json:"repositories_url,omitempty"`
+ HTMLURL *string `json:"html_url,omitempty"`
}
func (i Installation) String() string {
@@ -20,7 +23,7 @@ func (i Installation) String() string {
// ListRepos lists the repositories that the current installation has access to.
//
// GitHub API docs: https://developer.github.com/v3/integrations/installations/#list-repositories
-func (s *IntegrationsService) ListRepos(opt *ListOptions) ([]*Repository, *Response, error) {
+func (s *IntegrationsService) ListRepos(ctx context.Context, opt *ListOptions) ([]*Repository, *Response, error) {
u, err := addOptions("installation/repositories", opt)
if err != nil {
return nil, nil, err
@@ -37,7 +40,7 @@ func (s *IntegrationsService) ListRepos(opt *ListOptions) ([]*Repository, *Respo
var r struct {
Repositories []*Repository `json:"repositories"`
}
- resp, err := s.client.Do(req, &r)
+ resp, err := s.client.Do(ctx, req, &r)
if err != nil {
return nil, resp, err
}
diff --git a/vendor/github.com/google/go-github/github/issues.go b/vendor/github.com/google/go-github/github/issues.go
index c1997ee..b437d50 100644
--- a/vendor/github.com/google/go-github/github/issues.go
+++ b/vendor/github.com/google/go-github/github/issues.go
@@ -6,6 +6,7 @@
package github
import (
+ "context"
"fmt"
"time"
)
@@ -26,6 +27,7 @@ type Issue struct {
ID *int `json:"id,omitempty"`
Number *int `json:"number,omitempty"`
State *string `json:"state,omitempty"`
+ Locked *bool `json:"locked,omitempty"`
Title *string `json:"title,omitempty"`
Body *string `json:"body,omitempty"`
User *User `json:"user,omitempty"`
@@ -35,6 +37,7 @@ type Issue struct {
ClosedAt *time.Time `json:"closed_at,omitempty"`
CreatedAt *time.Time `json:"created_at,omitempty"`
UpdatedAt *time.Time `json:"updated_at,omitempty"`
+ ClosedBy *User `json:"closed_by,omitempty"`
URL *string `json:"url,omitempty"`
HTMLURL *string `json:"html_url,omitempty"`
Milestone *Milestone `json:"milestone,omitempty"`
@@ -108,26 +111,26 @@ type PullRequestLinks struct {
// repositories.
//
// GitHub API docs: https://developer.github.com/v3/issues/#list-issues
-func (s *IssuesService) List(all bool, opt *IssueListOptions) ([]*Issue, *Response, error) {
+func (s *IssuesService) List(ctx context.Context, all bool, opt *IssueListOptions) ([]*Issue, *Response, error) {
var u string
if all {
u = "issues"
} else {
u = "user/issues"
}
- return s.listIssues(u, opt)
+ return s.listIssues(ctx, u, opt)
}
// ListByOrg fetches the issues in the specified organization for the
// authenticated user.
//
// GitHub API docs: https://developer.github.com/v3/issues/#list-issues
-func (s *IssuesService) ListByOrg(org string, opt *IssueListOptions) ([]*Issue, *Response, error) {
+func (s *IssuesService) ListByOrg(ctx context.Context, org string, opt *IssueListOptions) ([]*Issue, *Response, error) {
u := fmt.Sprintf("orgs/%v/issues", org)
- return s.listIssues(u, opt)
+ return s.listIssues(ctx, u, opt)
}
-func (s *IssuesService) listIssues(u string, opt *IssueListOptions) ([]*Issue, *Response, error) {
+func (s *IssuesService) listIssues(ctx context.Context, u string, opt *IssueListOptions) ([]*Issue, *Response, error) {
u, err := addOptions(u, opt)
if err != nil {
return nil, nil, err
@@ -142,7 +145,7 @@ func (s *IssuesService) listIssues(u string, opt *IssueListOptions) ([]*Issue, *
req.Header.Set("Accept", mediaTypeReactionsPreview)
var issues []*Issue
- resp, err := s.client.Do(req, &issues)
+ resp, err := s.client.Do(ctx, req, &issues)
if err != nil {
return nil, resp, err
}
@@ -193,7 +196,7 @@ type IssueListByRepoOptions struct {
// ListByRepo lists the issues for the specified repository.
//
// GitHub API docs: https://developer.github.com/v3/issues/#list-issues-for-a-repository
-func (s *IssuesService) ListByRepo(owner string, repo string, opt *IssueListByRepoOptions) ([]*Issue, *Response, error) {
+func (s *IssuesService) ListByRepo(ctx context.Context, owner string, repo string, opt *IssueListByRepoOptions) ([]*Issue, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/issues", owner, repo)
u, err := addOptions(u, opt)
if err != nil {
@@ -209,7 +212,7 @@ func (s *IssuesService) ListByRepo(owner string, repo string, opt *IssueListByRe
req.Header.Set("Accept", mediaTypeReactionsPreview)
var issues []*Issue
- resp, err := s.client.Do(req, &issues)
+ resp, err := s.client.Do(ctx, req, &issues)
if err != nil {
return nil, resp, err
}
@@ -220,7 +223,7 @@ func (s *IssuesService) ListByRepo(owner string, repo string, opt *IssueListByRe
// Get a single issue.
//
// GitHub API docs: https://developer.github.com/v3/issues/#get-a-single-issue
-func (s *IssuesService) Get(owner string, repo string, number int) (*Issue, *Response, error) {
+func (s *IssuesService) Get(ctx context.Context, owner string, repo string, number int) (*Issue, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/issues/%d", owner, repo, number)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -231,7 +234,7 @@ func (s *IssuesService) Get(owner string, repo string, number int) (*Issue, *Res
req.Header.Set("Accept", mediaTypeReactionsPreview)
issue := new(Issue)
- resp, err := s.client.Do(req, issue)
+ resp, err := s.client.Do(ctx, req, issue)
if err != nil {
return nil, resp, err
}
@@ -242,7 +245,7 @@ func (s *IssuesService) Get(owner string, repo string, number int) (*Issue, *Res
// Create a new issue on the specified repository.
//
// GitHub API docs: https://developer.github.com/v3/issues/#create-an-issue
-func (s *IssuesService) Create(owner string, repo string, issue *IssueRequest) (*Issue, *Response, error) {
+func (s *IssuesService) Create(ctx context.Context, owner string, repo string, issue *IssueRequest) (*Issue, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/issues", owner, repo)
req, err := s.client.NewRequest("POST", u, issue)
if err != nil {
@@ -250,7 +253,7 @@ func (s *IssuesService) Create(owner string, repo string, issue *IssueRequest) (
}
i := new(Issue)
- resp, err := s.client.Do(req, i)
+ resp, err := s.client.Do(ctx, req, i)
if err != nil {
return nil, resp, err
}
@@ -261,7 +264,7 @@ func (s *IssuesService) Create(owner string, repo string, issue *IssueRequest) (
// Edit an issue.
//
// GitHub API docs: https://developer.github.com/v3/issues/#edit-an-issue
-func (s *IssuesService) Edit(owner string, repo string, number int, issue *IssueRequest) (*Issue, *Response, error) {
+func (s *IssuesService) Edit(ctx context.Context, owner string, repo string, number int, issue *IssueRequest) (*Issue, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/issues/%d", owner, repo, number)
req, err := s.client.NewRequest("PATCH", u, issue)
if err != nil {
@@ -269,7 +272,7 @@ func (s *IssuesService) Edit(owner string, repo string, number int, issue *Issue
}
i := new(Issue)
- resp, err := s.client.Do(req, i)
+ resp, err := s.client.Do(ctx, req, i)
if err != nil {
return nil, resp, err
}
@@ -280,25 +283,25 @@ func (s *IssuesService) Edit(owner string, repo string, number int, issue *Issue
// Lock an issue's conversation.
//
// GitHub API docs: https://developer.github.com/v3/issues/#lock-an-issue
-func (s *IssuesService) Lock(owner string, repo string, number int) (*Response, error) {
+func (s *IssuesService) Lock(ctx context.Context, owner string, repo string, number int) (*Response, error) {
u := fmt.Sprintf("repos/%v/%v/issues/%d/lock", owner, repo, number)
req, err := s.client.NewRequest("PUT", u, nil)
if err != nil {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
// Unlock an issue's conversation.
//
// GitHub API docs: https://developer.github.com/v3/issues/#unlock-an-issue
-func (s *IssuesService) Unlock(owner string, repo string, number int) (*Response, error) {
+func (s *IssuesService) Unlock(ctx context.Context, owner string, repo string, number int) (*Response, error) {
u := fmt.Sprintf("repos/%v/%v/issues/%d/lock", owner, repo, number)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
diff --git a/vendor/github.com/google/go-github/github/issues_assignees.go b/vendor/github.com/google/go-github/github/issues_assignees.go
index bdc6a6b..9cb366f 100644
--- a/vendor/github.com/google/go-github/github/issues_assignees.go
+++ b/vendor/github.com/google/go-github/github/issues_assignees.go
@@ -5,13 +5,16 @@
package github
-import "fmt"
+import (
+ "context"
+ "fmt"
+)
// ListAssignees fetches all available assignees (owners and collaborators) to
// which issues may be assigned.
//
// GitHub API docs: https://developer.github.com/v3/issues/assignees/#list-assignees
-func (s *IssuesService) ListAssignees(owner, repo string, opt *ListOptions) ([]*User, *Response, error) {
+func (s *IssuesService) ListAssignees(ctx context.Context, owner, repo string, opt *ListOptions) ([]*User, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/assignees", owner, repo)
u, err := addOptions(u, opt)
if err != nil {
@@ -23,7 +26,7 @@ func (s *IssuesService) ListAssignees(owner, repo string, opt *ListOptions) ([]*
return nil, nil, err
}
var assignees []*User
- resp, err := s.client.Do(req, &assignees)
+ resp, err := s.client.Do(ctx, req, &assignees)
if err != nil {
return nil, resp, err
}
@@ -34,13 +37,13 @@ func (s *IssuesService) ListAssignees(owner, repo string, opt *ListOptions) ([]*
// IsAssignee checks if a user is an assignee for the specified repository.
//
// GitHub API docs: https://developer.github.com/v3/issues/assignees/#check-assignee
-func (s *IssuesService) IsAssignee(owner, repo, user string) (bool, *Response, error) {
+func (s *IssuesService) IsAssignee(ctx context.Context, owner, repo, user string) (bool, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/assignees/%v", owner, repo, user)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return false, nil, err
}
- resp, err := s.client.Do(req, nil)
+ resp, err := s.client.Do(ctx, req, nil)
assignee, err := parseBoolResponse(err)
return assignee, resp, err
}
@@ -48,7 +51,7 @@ func (s *IssuesService) IsAssignee(owner, repo, user string) (bool, *Response, e
// AddAssignees adds the provided GitHub users as assignees to the issue.
//
// GitHub API docs: https://developer.github.com/v3/issues/assignees/#add-assignees-to-an-issue
-func (s *IssuesService) AddAssignees(owner, repo string, number int, assignees []string) (*Issue, *Response, error) {
+func (s *IssuesService) AddAssignees(ctx context.Context, owner, repo string, number int, assignees []string) (*Issue, *Response, error) {
users := &struct {
Assignees []string `json:"assignees,omitempty"`
}{Assignees: assignees}
@@ -59,14 +62,14 @@ func (s *IssuesService) AddAssignees(owner, repo string, number int, assignees [
}
issue := &Issue{}
- resp, err := s.client.Do(req, issue)
+ resp, err := s.client.Do(ctx, req, issue)
return issue, resp, err
}
// RemoveAssignees removes the provided GitHub users as assignees from the issue.
//
// GitHub API docs: https://developer.github.com/v3/issues/assignees/#remove-assignees-from-an-issue
-func (s *IssuesService) RemoveAssignees(owner, repo string, number int, assignees []string) (*Issue, *Response, error) {
+func (s *IssuesService) RemoveAssignees(ctx context.Context, owner, repo string, number int, assignees []string) (*Issue, *Response, error) {
users := &struct {
Assignees []string `json:"assignees,omitempty"`
}{Assignees: assignees}
@@ -77,6 +80,6 @@ func (s *IssuesService) RemoveAssignees(owner, repo string, number int, assignee
}
issue := &Issue{}
- resp, err := s.client.Do(req, issue)
+ resp, err := s.client.Do(ctx, req, issue)
return issue, resp, err
}
diff --git a/vendor/github.com/google/go-github/github/issues_comments.go b/vendor/github.com/google/go-github/github/issues_comments.go
index e8a852e..fd72657 100644
--- a/vendor/github.com/google/go-github/github/issues_comments.go
+++ b/vendor/github.com/google/go-github/github/issues_comments.go
@@ -6,6 +6,7 @@
package github
import (
+ "context"
"fmt"
"time"
)
@@ -46,7 +47,7 @@ type IssueListCommentsOptions struct {
// number of 0 will return all comments on all issues for the repository.
//
// GitHub API docs: https://developer.github.com/v3/issues/comments/#list-comments-on-an-issue
-func (s *IssuesService) ListComments(owner string, repo string, number int, opt *IssueListCommentsOptions) ([]*IssueComment, *Response, error) {
+func (s *IssuesService) ListComments(ctx context.Context, owner string, repo string, number int, opt *IssueListCommentsOptions) ([]*IssueComment, *Response, error) {
var u string
if number == 0 {
u = fmt.Sprintf("repos/%v/%v/issues/comments", owner, repo)
@@ -67,7 +68,7 @@ func (s *IssuesService) ListComments(owner string, repo string, number int, opt
req.Header.Set("Accept", mediaTypeReactionsPreview)
var comments []*IssueComment
- resp, err := s.client.Do(req, &comments)
+ resp, err := s.client.Do(ctx, req, &comments)
if err != nil {
return nil, resp, err
}
@@ -78,7 +79,7 @@ func (s *IssuesService) ListComments(owner string, repo string, number int, opt
// GetComment fetches the specified issue comment.
//
// GitHub API docs: https://developer.github.com/v3/issues/comments/#get-a-single-comment
-func (s *IssuesService) GetComment(owner string, repo string, id int) (*IssueComment, *Response, error) {
+func (s *IssuesService) GetComment(ctx context.Context, owner string, repo string, id int) (*IssueComment, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/issues/comments/%d", owner, repo, id)
req, err := s.client.NewRequest("GET", u, nil)
@@ -90,7 +91,7 @@ func (s *IssuesService) GetComment(owner string, repo string, id int) (*IssueCom
req.Header.Set("Accept", mediaTypeReactionsPreview)
comment := new(IssueComment)
- resp, err := s.client.Do(req, comment)
+ resp, err := s.client.Do(ctx, req, comment)
if err != nil {
return nil, resp, err
}
@@ -101,14 +102,14 @@ func (s *IssuesService) GetComment(owner string, repo string, id int) (*IssueCom
// CreateComment creates a new comment on the specified issue.
//
// GitHub API docs: https://developer.github.com/v3/issues/comments/#create-a-comment
-func (s *IssuesService) CreateComment(owner string, repo string, number int, comment *IssueComment) (*IssueComment, *Response, error) {
+func (s *IssuesService) CreateComment(ctx context.Context, owner string, repo string, number int, comment *IssueComment) (*IssueComment, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/issues/%d/comments", owner, repo, number)
req, err := s.client.NewRequest("POST", u, comment)
if err != nil {
return nil, nil, err
}
c := new(IssueComment)
- resp, err := s.client.Do(req, c)
+ resp, err := s.client.Do(ctx, req, c)
if err != nil {
return nil, resp, err
}
@@ -119,14 +120,14 @@ func (s *IssuesService) CreateComment(owner string, repo string, number int, com
// EditComment updates an issue comment.
//
// GitHub API docs: https://developer.github.com/v3/issues/comments/#edit-a-comment
-func (s *IssuesService) EditComment(owner string, repo string, id int, comment *IssueComment) (*IssueComment, *Response, error) {
+func (s *IssuesService) EditComment(ctx context.Context, owner string, repo string, id int, comment *IssueComment) (*IssueComment, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/issues/comments/%d", owner, repo, id)
req, err := s.client.NewRequest("PATCH", u, comment)
if err != nil {
return nil, nil, err
}
c := new(IssueComment)
- resp, err := s.client.Do(req, c)
+ resp, err := s.client.Do(ctx, req, c)
if err != nil {
return nil, resp, err
}
@@ -137,11 +138,11 @@ func (s *IssuesService) EditComment(owner string, repo string, id int, comment *
// DeleteComment deletes an issue comment.
//
// GitHub API docs: https://developer.github.com/v3/issues/comments/#delete-a-comment
-func (s *IssuesService) DeleteComment(owner string, repo string, id int) (*Response, error) {
+func (s *IssuesService) DeleteComment(ctx context.Context, owner string, repo string, id int) (*Response, error) {
u := fmt.Sprintf("repos/%v/%v/issues/comments/%d", owner, repo, id)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
diff --git a/vendor/github.com/google/go-github/github/issues_events.go b/vendor/github.com/google/go-github/github/issues_events.go
index 98f215c..2d5e19a 100644
--- a/vendor/github.com/google/go-github/github/issues_events.go
+++ b/vendor/github.com/google/go-github/github/issues_events.go
@@ -6,6 +6,7 @@
package github
import (
+ "context"
"fmt"
"time"
)
@@ -64,6 +65,7 @@ type IssueEvent struct {
// Only present on certain events; see above.
Assignee *User `json:"assignee,omitempty"`
+ Assigner *User `json:"assigner,omitempty"`
CommitID *string `json:"commit_id,omitempty"`
Milestone *Milestone `json:"milestone,omitempty"`
Label *Label `json:"label,omitempty"`
@@ -73,7 +75,7 @@ type IssueEvent struct {
// ListIssueEvents lists events for the specified issue.
//
// GitHub API docs: https://developer.github.com/v3/issues/events/#list-events-for-an-issue
-func (s *IssuesService) ListIssueEvents(owner, repo string, number int, opt *ListOptions) ([]*IssueEvent, *Response, error) {
+func (s *IssuesService) ListIssueEvents(ctx context.Context, owner, repo string, number int, opt *ListOptions) ([]*IssueEvent, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/issues/%v/events", owner, repo, number)
u, err := addOptions(u, opt)
if err != nil {
@@ -86,7 +88,7 @@ func (s *IssuesService) ListIssueEvents(owner, repo string, number int, opt *Lis
}
var events []*IssueEvent
- resp, err := s.client.Do(req, &events)
+ resp, err := s.client.Do(ctx, req, &events)
if err != nil {
return nil, resp, err
}
@@ -97,7 +99,7 @@ func (s *IssuesService) ListIssueEvents(owner, repo string, number int, opt *Lis
// ListRepositoryEvents lists events for the specified repository.
//
// GitHub API docs: https://developer.github.com/v3/issues/events/#list-events-for-a-repository
-func (s *IssuesService) ListRepositoryEvents(owner, repo string, opt *ListOptions) ([]*IssueEvent, *Response, error) {
+func (s *IssuesService) ListRepositoryEvents(ctx context.Context, owner, repo string, opt *ListOptions) ([]*IssueEvent, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/issues/events", owner, repo)
u, err := addOptions(u, opt)
if err != nil {
@@ -110,7 +112,7 @@ func (s *IssuesService) ListRepositoryEvents(owner, repo string, opt *ListOption
}
var events []*IssueEvent
- resp, err := s.client.Do(req, &events)
+ resp, err := s.client.Do(ctx, req, &events)
if err != nil {
return nil, resp, err
}
@@ -121,7 +123,7 @@ func (s *IssuesService) ListRepositoryEvents(owner, repo string, opt *ListOption
// GetEvent returns the specified issue event.
//
// GitHub API docs: https://developer.github.com/v3/issues/events/#get-a-single-event
-func (s *IssuesService) GetEvent(owner, repo string, id int) (*IssueEvent, *Response, error) {
+func (s *IssuesService) GetEvent(ctx context.Context, owner, repo string, id int) (*IssueEvent, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/issues/events/%v", owner, repo, id)
req, err := s.client.NewRequest("GET", u, nil)
@@ -130,7 +132,7 @@ func (s *IssuesService) GetEvent(owner, repo string, id int) (*IssueEvent, *Resp
}
event := new(IssueEvent)
- resp, err := s.client.Do(req, event)
+ resp, err := s.client.Do(ctx, req, event)
if err != nil {
return nil, resp, err
}
diff --git a/vendor/github.com/google/go-github/github/issues_labels.go b/vendor/github.com/google/go-github/github/issues_labels.go
index c9f8c46..a39001d 100644
--- a/vendor/github.com/google/go-github/github/issues_labels.go
+++ b/vendor/github.com/google/go-github/github/issues_labels.go
@@ -5,10 +5,14 @@
package github
-import "fmt"
+import (
+ "context"
+ "fmt"
+)
// Label represents a GitHub label on an Issue
type Label struct {
+ ID *int `json:"id,omitempty"`
URL *string `json:"url,omitempty"`
Name *string `json:"name,omitempty"`
Color *string `json:"color,omitempty"`
@@ -21,7 +25,7 @@ func (l Label) String() string {
// ListLabels lists all labels for a repository.
//
// GitHub API docs: https://developer.github.com/v3/issues/labels/#list-all-labels-for-this-repository
-func (s *IssuesService) ListLabels(owner string, repo string, opt *ListOptions) ([]*Label, *Response, error) {
+func (s *IssuesService) ListLabels(ctx context.Context, owner string, repo string, opt *ListOptions) ([]*Label, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/labels", owner, repo)
u, err := addOptions(u, opt)
if err != nil {
@@ -34,7 +38,7 @@ func (s *IssuesService) ListLabels(owner string, repo string, opt *ListOptions)
}
var labels []*Label
- resp, err := s.client.Do(req, &labels)
+ resp, err := s.client.Do(ctx, req, &labels)
if err != nil {
return nil, resp, err
}
@@ -45,7 +49,7 @@ func (s *IssuesService) ListLabels(owner string, repo string, opt *ListOptions)
// GetLabel gets a single label.
//
// GitHub API docs: https://developer.github.com/v3/issues/labels/#get-a-single-label
-func (s *IssuesService) GetLabel(owner string, repo string, name string) (*Label, *Response, error) {
+func (s *IssuesService) GetLabel(ctx context.Context, owner string, repo string, name string) (*Label, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/labels/%v", owner, repo, name)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -53,7 +57,7 @@ func (s *IssuesService) GetLabel(owner string, repo string, name string) (*Label
}
label := new(Label)
- resp, err := s.client.Do(req, label)
+ resp, err := s.client.Do(ctx, req, label)
if err != nil {
return nil, resp, err
}
@@ -64,7 +68,7 @@ func (s *IssuesService) GetLabel(owner string, repo string, name string) (*Label
// CreateLabel creates a new label on the specified repository.
//
// GitHub API docs: https://developer.github.com/v3/issues/labels/#create-a-label
-func (s *IssuesService) CreateLabel(owner string, repo string, label *Label) (*Label, *Response, error) {
+func (s *IssuesService) CreateLabel(ctx context.Context, owner string, repo string, label *Label) (*Label, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/labels", owner, repo)
req, err := s.client.NewRequest("POST", u, label)
if err != nil {
@@ -72,7 +76,7 @@ func (s *IssuesService) CreateLabel(owner string, repo string, label *Label) (*L
}
l := new(Label)
- resp, err := s.client.Do(req, l)
+ resp, err := s.client.Do(ctx, req, l)
if err != nil {
return nil, resp, err
}
@@ -83,7 +87,7 @@ func (s *IssuesService) CreateLabel(owner string, repo string, label *Label) (*L
// EditLabel edits a label.
//
// GitHub API docs: https://developer.github.com/v3/issues/labels/#update-a-label
-func (s *IssuesService) EditLabel(owner string, repo string, name string, label *Label) (*Label, *Response, error) {
+func (s *IssuesService) EditLabel(ctx context.Context, owner string, repo string, name string, label *Label) (*Label, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/labels/%v", owner, repo, name)
req, err := s.client.NewRequest("PATCH", u, label)
if err != nil {
@@ -91,7 +95,7 @@ func (s *IssuesService) EditLabel(owner string, repo string, name string, label
}
l := new(Label)
- resp, err := s.client.Do(req, l)
+ resp, err := s.client.Do(ctx, req, l)
if err != nil {
return nil, resp, err
}
@@ -102,19 +106,19 @@ func (s *IssuesService) EditLabel(owner string, repo string, name string, label
// DeleteLabel deletes a label.
//
// GitHub API docs: https://developer.github.com/v3/issues/labels/#delete-a-label
-func (s *IssuesService) DeleteLabel(owner string, repo string, name string) (*Response, error) {
+func (s *IssuesService) DeleteLabel(ctx context.Context, owner string, repo string, name string) (*Response, error) {
u := fmt.Sprintf("repos/%v/%v/labels/%v", owner, repo, name)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
// ListLabelsByIssue lists all labels for an issue.
//
// GitHub API docs: https://developer.github.com/v3/issues/labels/#list-labels-on-an-issue
-func (s *IssuesService) ListLabelsByIssue(owner string, repo string, number int, opt *ListOptions) ([]*Label, *Response, error) {
+func (s *IssuesService) ListLabelsByIssue(ctx context.Context, owner string, repo string, number int, opt *ListOptions) ([]*Label, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/issues/%d/labels", owner, repo, number)
u, err := addOptions(u, opt)
if err != nil {
@@ -127,7 +131,7 @@ func (s *IssuesService) ListLabelsByIssue(owner string, repo string, number int,
}
var labels []*Label
- resp, err := s.client.Do(req, &labels)
+ resp, err := s.client.Do(ctx, req, &labels)
if err != nil {
return nil, resp, err
}
@@ -138,7 +142,7 @@ func (s *IssuesService) ListLabelsByIssue(owner string, repo string, number int,
// AddLabelsToIssue adds labels to an issue.
//
// GitHub API docs: https://developer.github.com/v3/issues/labels/#add-labels-to-an-issue
-func (s *IssuesService) AddLabelsToIssue(owner string, repo string, number int, labels []string) ([]*Label, *Response, error) {
+func (s *IssuesService) AddLabelsToIssue(ctx context.Context, owner string, repo string, number int, labels []string) ([]*Label, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/issues/%d/labels", owner, repo, number)
req, err := s.client.NewRequest("POST", u, labels)
if err != nil {
@@ -146,7 +150,7 @@ func (s *IssuesService) AddLabelsToIssue(owner string, repo string, number int,
}
var l []*Label
- resp, err := s.client.Do(req, &l)
+ resp, err := s.client.Do(ctx, req, &l)
if err != nil {
return nil, resp, err
}
@@ -157,19 +161,19 @@ func (s *IssuesService) AddLabelsToIssue(owner string, repo string, number int,
// RemoveLabelForIssue removes a label for an issue.
//
// GitHub API docs: https://developer.github.com/v3/issues/labels/#remove-a-label-from-an-issue
-func (s *IssuesService) RemoveLabelForIssue(owner string, repo string, number int, label string) (*Response, error) {
+func (s *IssuesService) RemoveLabelForIssue(ctx context.Context, owner string, repo string, number int, label string) (*Response, error) {
u := fmt.Sprintf("repos/%v/%v/issues/%d/labels/%v", owner, repo, number, label)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
// ReplaceLabelsForIssue replaces all labels for an issue.
//
// GitHub API docs: https://developer.github.com/v3/issues/labels/#replace-all-labels-for-an-issue
-func (s *IssuesService) ReplaceLabelsForIssue(owner string, repo string, number int, labels []string) ([]*Label, *Response, error) {
+func (s *IssuesService) ReplaceLabelsForIssue(ctx context.Context, owner string, repo string, number int, labels []string) ([]*Label, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/issues/%d/labels", owner, repo, number)
req, err := s.client.NewRequest("PUT", u, labels)
if err != nil {
@@ -177,7 +181,7 @@ func (s *IssuesService) ReplaceLabelsForIssue(owner string, repo string, number
}
var l []*Label
- resp, err := s.client.Do(req, &l)
+ resp, err := s.client.Do(ctx, req, &l)
if err != nil {
return nil, resp, err
}
@@ -188,19 +192,19 @@ func (s *IssuesService) ReplaceLabelsForIssue(owner string, repo string, number
// RemoveLabelsForIssue removes all labels for an issue.
//
// GitHub API docs: https://developer.github.com/v3/issues/labels/#remove-all-labels-from-an-issue
-func (s *IssuesService) RemoveLabelsForIssue(owner string, repo string, number int) (*Response, error) {
+func (s *IssuesService) RemoveLabelsForIssue(ctx context.Context, owner string, repo string, number int) (*Response, error) {
u := fmt.Sprintf("repos/%v/%v/issues/%d/labels", owner, repo, number)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
// ListLabelsForMilestone lists labels for every issue in a milestone.
//
// GitHub API docs: https://developer.github.com/v3/issues/labels/#get-labels-for-every-issue-in-a-milestone
-func (s *IssuesService) ListLabelsForMilestone(owner string, repo string, number int, opt *ListOptions) ([]*Label, *Response, error) {
+func (s *IssuesService) ListLabelsForMilestone(ctx context.Context, owner string, repo string, number int, opt *ListOptions) ([]*Label, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/milestones/%d/labels", owner, repo, number)
u, err := addOptions(u, opt)
if err != nil {
@@ -213,7 +217,7 @@ func (s *IssuesService) ListLabelsForMilestone(owner string, repo string, number
}
var labels []*Label
- resp, err := s.client.Do(req, &labels)
+ resp, err := s.client.Do(ctx, req, &labels)
if err != nil {
return nil, resp, err
}
diff --git a/vendor/github.com/google/go-github/github/issues_milestones.go b/vendor/github.com/google/go-github/github/issues_milestones.go
index 0cc2d58..e6e882d 100644
--- a/vendor/github.com/google/go-github/github/issues_milestones.go
+++ b/vendor/github.com/google/go-github/github/issues_milestones.go
@@ -6,11 +6,12 @@
package github
import (
+ "context"
"fmt"
"time"
)
-// Milestone represents a Github repository milestone.
+// Milestone represents a GitHub repository milestone.
type Milestone struct {
URL *string `json:"url,omitempty"`
HTMLURL *string `json:"html_url,omitempty"`
@@ -37,11 +38,11 @@ func (m Milestone) String() string {
// IssuesService.ListMilestones method.
type MilestoneListOptions struct {
// State filters milestones based on their state. Possible values are:
- // open, closed. Default is "open".
+ // open, closed, all. Default is "open".
State string `url:"state,omitempty"`
- // Sort specifies how to sort milestones. Possible values are: due_date, completeness.
- // Default value is "due_date".
+ // Sort specifies how to sort milestones. Possible values are: due_on, completeness.
+ // Default value is "due_on".
Sort string `url:"sort,omitempty"`
// Direction in which to sort milestones. Possible values are: asc, desc.
@@ -54,7 +55,7 @@ type MilestoneListOptions struct {
// ListMilestones lists all milestones for a repository.
//
// GitHub API docs: https://developer.github.com/v3/issues/milestones/#list-milestones-for-a-repository
-func (s *IssuesService) ListMilestones(owner string, repo string, opt *MilestoneListOptions) ([]*Milestone, *Response, error) {
+func (s *IssuesService) ListMilestones(ctx context.Context, owner string, repo string, opt *MilestoneListOptions) ([]*Milestone, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/milestones", owner, repo)
u, err := addOptions(u, opt)
if err != nil {
@@ -67,7 +68,7 @@ func (s *IssuesService) ListMilestones(owner string, repo string, opt *Milestone
}
var milestones []*Milestone
- resp, err := s.client.Do(req, &milestones)
+ resp, err := s.client.Do(ctx, req, &milestones)
if err != nil {
return nil, resp, err
}
@@ -78,7 +79,7 @@ func (s *IssuesService) ListMilestones(owner string, repo string, opt *Milestone
// GetMilestone gets a single milestone.
//
// GitHub API docs: https://developer.github.com/v3/issues/milestones/#get-a-single-milestone
-func (s *IssuesService) GetMilestone(owner string, repo string, number int) (*Milestone, *Response, error) {
+func (s *IssuesService) GetMilestone(ctx context.Context, owner string, repo string, number int) (*Milestone, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/milestones/%d", owner, repo, number)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -86,7 +87,7 @@ func (s *IssuesService) GetMilestone(owner string, repo string, number int) (*Mi
}
milestone := new(Milestone)
- resp, err := s.client.Do(req, milestone)
+ resp, err := s.client.Do(ctx, req, milestone)
if err != nil {
return nil, resp, err
}
@@ -97,7 +98,7 @@ func (s *IssuesService) GetMilestone(owner string, repo string, number int) (*Mi
// CreateMilestone creates a new milestone on the specified repository.
//
// GitHub API docs: https://developer.github.com/v3/issues/milestones/#create-a-milestone
-func (s *IssuesService) CreateMilestone(owner string, repo string, milestone *Milestone) (*Milestone, *Response, error) {
+func (s *IssuesService) CreateMilestone(ctx context.Context, owner string, repo string, milestone *Milestone) (*Milestone, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/milestones", owner, repo)
req, err := s.client.NewRequest("POST", u, milestone)
if err != nil {
@@ -105,7 +106,7 @@ func (s *IssuesService) CreateMilestone(owner string, repo string, milestone *Mi
}
m := new(Milestone)
- resp, err := s.client.Do(req, m)
+ resp, err := s.client.Do(ctx, req, m)
if err != nil {
return nil, resp, err
}
@@ -116,7 +117,7 @@ func (s *IssuesService) CreateMilestone(owner string, repo string, milestone *Mi
// EditMilestone edits a milestone.
//
// GitHub API docs: https://developer.github.com/v3/issues/milestones/#update-a-milestone
-func (s *IssuesService) EditMilestone(owner string, repo string, number int, milestone *Milestone) (*Milestone, *Response, error) {
+func (s *IssuesService) EditMilestone(ctx context.Context, owner string, repo string, number int, milestone *Milestone) (*Milestone, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/milestones/%d", owner, repo, number)
req, err := s.client.NewRequest("PATCH", u, milestone)
if err != nil {
@@ -124,7 +125,7 @@ func (s *IssuesService) EditMilestone(owner string, repo string, number int, mil
}
m := new(Milestone)
- resp, err := s.client.Do(req, m)
+ resp, err := s.client.Do(ctx, req, m)
if err != nil {
return nil, resp, err
}
@@ -135,12 +136,12 @@ func (s *IssuesService) EditMilestone(owner string, repo string, number int, mil
// DeleteMilestone deletes a milestone.
//
// GitHub API docs: https://developer.github.com/v3/issues/milestones/#delete-a-milestone
-func (s *IssuesService) DeleteMilestone(owner string, repo string, number int) (*Response, error) {
+func (s *IssuesService) DeleteMilestone(ctx context.Context, owner string, repo string, number int) (*Response, error) {
u := fmt.Sprintf("repos/%v/%v/milestones/%d", owner, repo, number)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
diff --git a/vendor/github.com/google/go-github/github/issues_timeline.go b/vendor/github.com/google/go-github/github/issues_timeline.go
index d20eef8..bc0b108 100644
--- a/vendor/github.com/google/go-github/github/issues_timeline.go
+++ b/vendor/github.com/google/go-github/github/issues_timeline.go
@@ -6,6 +6,7 @@
package github
import (
+ "context"
"fmt"
"time"
)
@@ -127,7 +128,7 @@ type Source struct {
// ListIssueTimeline lists events for the specified issue.
//
// GitHub API docs: https://developer.github.com/v3/issues/timeline/#list-events-for-an-issue
-func (s *IssuesService) ListIssueTimeline(owner, repo string, number int, opt *ListOptions) ([]*Timeline, *Response, error) {
+func (s *IssuesService) ListIssueTimeline(ctx context.Context, owner, repo string, number int, opt *ListOptions) ([]*Timeline, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/issues/%v/timeline", owner, repo, number)
u, err := addOptions(u, opt)
if err != nil {
@@ -143,6 +144,6 @@ func (s *IssuesService) ListIssueTimeline(owner, repo string, number int, opt *L
req.Header.Set("Accept", mediaTypeTimelinePreview)
var events []*Timeline
- resp, err := s.client.Do(req, &events)
+ resp, err := s.client.Do(ctx, req, &events)
return events, resp, err
}
diff --git a/vendor/github.com/google/go-github/github/licenses.go b/vendor/github.com/google/go-github/github/licenses.go
index 5340e61..e9cd177 100644
--- a/vendor/github.com/google/go-github/github/licenses.go
+++ b/vendor/github.com/google/go-github/github/licenses.go
@@ -5,7 +5,10 @@
package github
-import "fmt"
+import (
+ "context"
+ "fmt"
+)
// LicensesService handles communication with the license related
// methods of the GitHub API.
@@ -58,7 +61,7 @@ func (l License) String() string {
// List popular open source licenses.
//
// GitHub API docs: https://developer.github.com/v3/licenses/#list-all-licenses
-func (s *LicensesService) List() ([]*License, *Response, error) {
+func (s *LicensesService) List(ctx context.Context) ([]*License, *Response, error) {
req, err := s.client.NewRequest("GET", "licenses", nil)
if err != nil {
return nil, nil, err
@@ -68,7 +71,7 @@ func (s *LicensesService) List() ([]*License, *Response, error) {
req.Header.Set("Accept", mediaTypeLicensesPreview)
var licenses []*License
- resp, err := s.client.Do(req, &licenses)
+ resp, err := s.client.Do(ctx, req, &licenses)
if err != nil {
return nil, resp, err
}
@@ -79,7 +82,7 @@ func (s *LicensesService) List() ([]*License, *Response, error) {
// Get extended metadata for one license.
//
// GitHub API docs: https://developer.github.com/v3/licenses/#get-an-individual-license
-func (s *LicensesService) Get(licenseName string) (*License, *Response, error) {
+func (s *LicensesService) Get(ctx context.Context, licenseName string) (*License, *Response, error) {
u := fmt.Sprintf("licenses/%s", licenseName)
req, err := s.client.NewRequest("GET", u, nil)
@@ -91,7 +94,7 @@ func (s *LicensesService) Get(licenseName string) (*License, *Response, error) {
req.Header.Set("Accept", mediaTypeLicensesPreview)
license := new(License)
- resp, err := s.client.Do(req, license)
+ resp, err := s.client.Do(ctx, req, license)
if err != nil {
return nil, resp, err
}
diff --git a/vendor/github.com/google/go-github/github/messages.go b/vendor/github.com/google/go-github/github/messages.go
index 5f67ba5..a7ec65f 100644
--- a/vendor/github.com/google/go-github/github/messages.go
+++ b/vendor/github.com/google/go-github/github/messages.go
@@ -4,7 +4,7 @@
// license that can be found in the LICENSE file.
// This file provides functions for validating payloads from GitHub Webhooks.
-// GitHub docs: https://developer.github.com/webhooks/securing/#validating-payloads-from-github
+// GitHub API docs: https://developer.github.com/webhooks/securing/#validating-payloads-from-github
package github
@@ -31,7 +31,7 @@ const (
sha512Prefix = "sha512"
// signatureHeader is the GitHub header key used to pass the HMAC hexdigest.
signatureHeader = "X-Hub-Signature"
- // eventTypeHeader is the Github header key used to pass the event type.
+ // eventTypeHeader is the GitHub header key used to pass the event type.
eventTypeHeader = "X-Github-Event"
)
@@ -56,6 +56,9 @@ var (
"organization": "OrganizationEvent",
"page_build": "PageBuildEvent",
"ping": "PingEvent",
+ "project": "ProjectEvent",
+ "project_card": "ProjectCardEvent",
+ "project_column": "ProjectColumnEvent",
"public": "PublicEvent",
"pull_request_review": "PullRequestReviewEvent",
"pull_request_review_comment": "PullRequestReviewCommentEvent",
@@ -143,7 +146,7 @@ func ValidatePayload(r *http.Request, secretKey []byte) (payload []byte, err err
// payload is the JSON payload sent by GitHub Webhooks.
// secretKey is the GitHub Webhook secret message.
//
-// GitHub docs: https://developer.github.com/webhooks/securing/#validating-payloads-from-github
+// GitHub API docs: https://developer.github.com/webhooks/securing/#validating-payloads-from-github
func validateSignature(signature string, payload, secretKey []byte) error {
messageMAC, hashFunc, err := messageMAC(signature)
if err != nil {
@@ -162,7 +165,7 @@ func WebHookType(r *http.Request) string {
// ParseWebHook parses the event payload. For recognized event types, a
// value of the corresponding struct type will be returned (as returned
-// by Event.Payload()). An error will be returned for unrecognized event
+// by Event.ParsePayload()). An error will be returned for unrecognized event
// types.
//
// Example usage:
@@ -191,5 +194,5 @@ func ParseWebHook(messageType string, payload []byte) (interface{}, error) {
Type: &eventType,
RawPayload: (*json.RawMessage)(&payload),
}
- return event.Payload(), nil
+ return event.ParsePayload()
}
diff --git a/vendor/github.com/google/go-github/github/migrations.go b/vendor/github.com/google/go-github/github/migrations.go
index a7890b0..6793269 100644
--- a/vendor/github.com/google/go-github/github/migrations.go
+++ b/vendor/github.com/google/go-github/github/migrations.go
@@ -6,6 +6,7 @@
package github
import (
+ "context"
"errors"
"fmt"
"net/http"
@@ -74,7 +75,7 @@ type startMigration struct {
// repos is a slice of repository names to migrate.
//
// GitHub API docs: https://developer.github.com/v3/migration/migrations/#start-a-migration
-func (s *MigrationService) StartMigration(org string, repos []string, opt *MigrationOptions) (*Migration, *Response, error) {
+func (s *MigrationService) StartMigration(ctx context.Context, org string, repos []string, opt *MigrationOptions) (*Migration, *Response, error) {
u := fmt.Sprintf("orgs/%v/migrations", org)
body := &startMigration{Repositories: repos}
@@ -92,7 +93,7 @@ func (s *MigrationService) StartMigration(org string, repos []string, opt *Migra
req.Header.Set("Accept", mediaTypeMigrationsPreview)
m := &Migration{}
- resp, err := s.client.Do(req, m)
+ resp, err := s.client.Do(ctx, req, m)
if err != nil {
return nil, resp, err
}
@@ -103,7 +104,7 @@ func (s *MigrationService) StartMigration(org string, repos []string, opt *Migra
// ListMigrations lists the most recent migrations.
//
// GitHub API docs: https://developer.github.com/v3/migration/migrations/#get-a-list-of-migrations
-func (s *MigrationService) ListMigrations(org string) ([]*Migration, *Response, error) {
+func (s *MigrationService) ListMigrations(ctx context.Context, org string) ([]*Migration, *Response, error) {
u := fmt.Sprintf("orgs/%v/migrations", org)
req, err := s.client.NewRequest("GET", u, nil)
@@ -115,7 +116,7 @@ func (s *MigrationService) ListMigrations(org string) ([]*Migration, *Response,
req.Header.Set("Accept", mediaTypeMigrationsPreview)
var m []*Migration
- resp, err := s.client.Do(req, &m)
+ resp, err := s.client.Do(ctx, req, &m)
if err != nil {
return nil, resp, err
}
@@ -127,7 +128,7 @@ func (s *MigrationService) ListMigrations(org string) ([]*Migration, *Response,
// id is the migration ID.
//
// GitHub API docs: https://developer.github.com/v3/migration/migrations/#get-the-status-of-a-migration
-func (s *MigrationService) MigrationStatus(org string, id int) (*Migration, *Response, error) {
+func (s *MigrationService) MigrationStatus(ctx context.Context, org string, id int) (*Migration, *Response, error) {
u := fmt.Sprintf("orgs/%v/migrations/%v", org, id)
req, err := s.client.NewRequest("GET", u, nil)
@@ -139,7 +140,7 @@ func (s *MigrationService) MigrationStatus(org string, id int) (*Migration, *Res
req.Header.Set("Accept", mediaTypeMigrationsPreview)
m := &Migration{}
- resp, err := s.client.Do(req, m)
+ resp, err := s.client.Do(ctx, req, m)
if err != nil {
return nil, resp, err
}
@@ -151,7 +152,7 @@ func (s *MigrationService) MigrationStatus(org string, id int) (*Migration, *Res
// id is the migration ID.
//
// GitHub API docs: https://developer.github.com/v3/migration/migrations/#download-a-migration-archive
-func (s *MigrationService) MigrationArchiveURL(org string, id int) (url string, err error) {
+func (s *MigrationService) MigrationArchiveURL(ctx context.Context, org string, id int) (url string, err error) {
u := fmt.Sprintf("orgs/%v/migrations/%v/archive", org, id)
req, err := s.client.NewRequest("GET", u, nil)
@@ -174,7 +175,7 @@ func (s *MigrationService) MigrationArchiveURL(org string, id int) (url string,
}
defer func() { s.client.client.CheckRedirect = saveRedirect }()
- _, err = s.client.Do(req, nil) // expect error from disable redirect
+ _, err = s.client.Do(ctx, req, nil) // expect error from disable redirect
if err == nil {
return "", errors.New("expected redirect, none provided")
}
@@ -188,7 +189,7 @@ func (s *MigrationService) MigrationArchiveURL(org string, id int) (url string,
// id is the migration ID.
//
// GitHub API docs: https://developer.github.com/v3/migration/migrations/#delete-a-migration-archive
-func (s *MigrationService) DeleteMigration(org string, id int) (*Response, error) {
+func (s *MigrationService) DeleteMigration(ctx context.Context, org string, id int) (*Response, error) {
u := fmt.Sprintf("orgs/%v/migrations/%v/archive", org, id)
req, err := s.client.NewRequest("DELETE", u, nil)
@@ -199,7 +200,7 @@ func (s *MigrationService) DeleteMigration(org string, id int) (*Response, error
// TODO: remove custom Accept header when this API fully launches.
req.Header.Set("Accept", mediaTypeMigrationsPreview)
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
// UnlockRepo unlocks a repository that was locked for migration.
@@ -208,7 +209,7 @@ func (s *MigrationService) DeleteMigration(org string, id int) (*Response, error
// is complete and you no longer need the source data.
//
// GitHub API docs: https://developer.github.com/v3/migration/migrations/#unlock-a-repository
-func (s *MigrationService) UnlockRepo(org string, id int, repo string) (*Response, error) {
+func (s *MigrationService) UnlockRepo(ctx context.Context, org string, id int, repo string) (*Response, error) {
u := fmt.Sprintf("orgs/%v/migrations/%v/repos/%v/lock", org, id, repo)
req, err := s.client.NewRequest("DELETE", u, nil)
@@ -219,5 +220,5 @@ func (s *MigrationService) UnlockRepo(org string, id int, repo string) (*Respons
// TODO: remove custom Accept header when this API fully launches.
req.Header.Set("Accept", mediaTypeMigrationsPreview)
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
diff --git a/vendor/github.com/google/go-github/github/migrations_source_import.go b/vendor/github.com/google/go-github/github/migrations_source_import.go
index d593e40..aa45a5a 100644
--- a/vendor/github.com/google/go-github/github/migrations_source_import.go
+++ b/vendor/github.com/google/go-github/github/migrations_source_import.go
@@ -5,7 +5,10 @@
package github
-import "fmt"
+import (
+ "context"
+ "fmt"
+)
// Import represents a repository import request.
type Import struct {
@@ -144,7 +147,7 @@ func (f LargeFile) String() string {
// StartImport initiates a repository import.
//
// GitHub API docs: https://developer.github.com/v3/migration/source_imports/#start-an-import
-func (s *MigrationService) StartImport(owner, repo string, in *Import) (*Import, *Response, error) {
+func (s *MigrationService) StartImport(ctx context.Context, owner, repo string, in *Import) (*Import, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/import", owner, repo)
req, err := s.client.NewRequest("PUT", u, in)
if err != nil {
@@ -155,7 +158,7 @@ func (s *MigrationService) StartImport(owner, repo string, in *Import) (*Import,
req.Header.Set("Accept", mediaTypeImportPreview)
out := new(Import)
- resp, err := s.client.Do(req, out)
+ resp, err := s.client.Do(ctx, req, out)
if err != nil {
return nil, resp, err
}
@@ -166,7 +169,7 @@ func (s *MigrationService) StartImport(owner, repo string, in *Import) (*Import,
// ImportProgress queries for the status and progress of an ongoing repository import.
//
// GitHub API docs: https://developer.github.com/v3/migration/source_imports/#get-import-progress
-func (s *MigrationService) ImportProgress(owner, repo string) (*Import, *Response, error) {
+func (s *MigrationService) ImportProgress(ctx context.Context, owner, repo string) (*Import, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/import", owner, repo)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -177,7 +180,7 @@ func (s *MigrationService) ImportProgress(owner, repo string) (*Import, *Respons
req.Header.Set("Accept", mediaTypeImportPreview)
out := new(Import)
- resp, err := s.client.Do(req, out)
+ resp, err := s.client.Do(ctx, req, out)
if err != nil {
return nil, resp, err
}
@@ -188,7 +191,7 @@ func (s *MigrationService) ImportProgress(owner, repo string) (*Import, *Respons
// UpdateImport initiates a repository import.
//
// GitHub API docs: https://developer.github.com/v3/migration/source_imports/#update-existing-import
-func (s *MigrationService) UpdateImport(owner, repo string, in *Import) (*Import, *Response, error) {
+func (s *MigrationService) UpdateImport(ctx context.Context, owner, repo string, in *Import) (*Import, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/import", owner, repo)
req, err := s.client.NewRequest("PATCH", u, in)
if err != nil {
@@ -199,7 +202,7 @@ func (s *MigrationService) UpdateImport(owner, repo string, in *Import) (*Import
req.Header.Set("Accept", mediaTypeImportPreview)
out := new(Import)
- resp, err := s.client.Do(req, out)
+ resp, err := s.client.Do(ctx, req, out)
if err != nil {
return nil, resp, err
}
@@ -220,7 +223,7 @@ func (s *MigrationService) UpdateImport(owner, repo string, in *Import) (*Import
// information.
//
// GitHub API docs: https://developer.github.com/v3/migration/source_imports/#get-commit-authors
-func (s *MigrationService) CommitAuthors(owner, repo string) ([]*SourceImportAuthor, *Response, error) {
+func (s *MigrationService) CommitAuthors(ctx context.Context, owner, repo string) ([]*SourceImportAuthor, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/import/authors", owner, repo)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -231,7 +234,7 @@ func (s *MigrationService) CommitAuthors(owner, repo string) ([]*SourceImportAut
req.Header.Set("Accept", mediaTypeImportPreview)
var authors []*SourceImportAuthor
- resp, err := s.client.Do(req, &authors)
+ resp, err := s.client.Do(ctx, req, &authors)
if err != nil {
return nil, resp, err
}
@@ -244,7 +247,7 @@ func (s *MigrationService) CommitAuthors(owner, repo string) ([]*SourceImportAut
// commits to the repository.
//
// GitHub API docs: https://developer.github.com/v3/migration/source_imports/#map-a-commit-author
-func (s *MigrationService) MapCommitAuthor(owner, repo string, id int, author *SourceImportAuthor) (*SourceImportAuthor, *Response, error) {
+func (s *MigrationService) MapCommitAuthor(ctx context.Context, owner, repo string, id int, author *SourceImportAuthor) (*SourceImportAuthor, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/import/authors/%v", owner, repo, id)
req, err := s.client.NewRequest("PATCH", u, author)
if err != nil {
@@ -255,7 +258,7 @@ func (s *MigrationService) MapCommitAuthor(owner, repo string, id int, author *S
req.Header.Set("Accept", mediaTypeImportPreview)
out := new(SourceImportAuthor)
- resp, err := s.client.Do(req, out)
+ resp, err := s.client.Do(ctx, req, out)
if err != nil {
return nil, resp, err
}
@@ -268,7 +271,7 @@ func (s *MigrationService) MapCommitAuthor(owner, repo string, id int, author *S
// used.
//
// GitHub API docs: https://developer.github.com/v3/migration/source_imports/#set-git-lfs-preference
-func (s *MigrationService) SetLFSPreference(owner, repo string, in *Import) (*Import, *Response, error) {
+func (s *MigrationService) SetLFSPreference(ctx context.Context, owner, repo string, in *Import) (*Import, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/import/lfs", owner, repo)
req, err := s.client.NewRequest("PATCH", u, in)
if err != nil {
@@ -279,7 +282,7 @@ func (s *MigrationService) SetLFSPreference(owner, repo string, in *Import) (*Im
req.Header.Set("Accept", mediaTypeImportPreview)
out := new(Import)
- resp, err := s.client.Do(req, out)
+ resp, err := s.client.Do(ctx, req, out)
if err != nil {
return nil, resp, err
}
@@ -290,7 +293,7 @@ func (s *MigrationService) SetLFSPreference(owner, repo string, in *Import) (*Im
// LargeFiles lists files larger than 100MB found during the import.
//
// GitHub API docs: https://developer.github.com/v3/migration/source_imports/#get-large-files
-func (s *MigrationService) LargeFiles(owner, repo string) ([]*LargeFile, *Response, error) {
+func (s *MigrationService) LargeFiles(ctx context.Context, owner, repo string) ([]*LargeFile, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/import/large_files", owner, repo)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -301,7 +304,7 @@ func (s *MigrationService) LargeFiles(owner, repo string) ([]*LargeFile, *Respon
req.Header.Set("Accept", mediaTypeImportPreview)
var files []*LargeFile
- resp, err := s.client.Do(req, &files)
+ resp, err := s.client.Do(ctx, req, &files)
if err != nil {
return nil, resp, err
}
@@ -312,7 +315,7 @@ func (s *MigrationService) LargeFiles(owner, repo string) ([]*LargeFile, *Respon
// CancelImport stops an import for a repository.
//
// GitHub API docs: https://developer.github.com/v3/migration/source_imports/#cancel-an-import
-func (s *MigrationService) CancelImport(owner, repo string) (*Response, error) {
+func (s *MigrationService) CancelImport(ctx context.Context, owner, repo string) (*Response, error) {
u := fmt.Sprintf("repos/%v/%v/import", owner, repo)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
@@ -322,5 +325,5 @@ func (s *MigrationService) CancelImport(owner, repo string) (*Response, error) {
// TODO: remove custom Accept header when this API fully launches
req.Header.Set("Accept", mediaTypeImportPreview)
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
diff --git a/vendor/github.com/google/go-github/github/misc.go b/vendor/github.com/google/go-github/github/misc.go
index 0cdb3f7..42d0d30 100644
--- a/vendor/github.com/google/go-github/github/misc.go
+++ b/vendor/github.com/google/go-github/github/misc.go
@@ -7,6 +7,7 @@ package github
import (
"bytes"
+ "context"
"fmt"
"net/url"
)
@@ -39,7 +40,7 @@ type markdownRequest struct {
// Markdown renders an arbitrary Markdown document.
//
// GitHub API docs: https://developer.github.com/v3/markdown/
-func (c *Client) Markdown(text string, opt *MarkdownOptions) (string, *Response, error) {
+func (c *Client) Markdown(ctx context.Context, text string, opt *MarkdownOptions) (string, *Response, error) {
request := &markdownRequest{Text: String(text)}
if opt != nil {
if opt.Mode != "" {
@@ -56,7 +57,7 @@ func (c *Client) Markdown(text string, opt *MarkdownOptions) (string, *Response,
}
buf := new(bytes.Buffer)
- resp, err := c.Do(req, buf)
+ resp, err := c.Do(ctx, req, buf)
if err != nil {
return "", resp, err
}
@@ -67,14 +68,14 @@ func (c *Client) Markdown(text string, opt *MarkdownOptions) (string, *Response,
// ListEmojis returns the emojis available to use on GitHub.
//
// GitHub API docs: https://developer.github.com/v3/emojis/
-func (c *Client) ListEmojis() (map[string]string, *Response, error) {
+func (c *Client) ListEmojis(ctx context.Context) (map[string]string, *Response, error) {
req, err := c.NewRequest("GET", "emojis", nil)
if err != nil {
return nil, nil, err
}
var emoji map[string]string
- resp, err := c.Do(req, &emoji)
+ resp, err := c.Do(ctx, req, &emoji)
if err != nil {
return nil, resp, err
}
@@ -109,14 +110,14 @@ type APIMeta struct {
// endpoint provides information about that installation.
//
// GitHub API docs: https://developer.github.com/v3/meta/
-func (c *Client) APIMeta() (*APIMeta, *Response, error) {
+func (c *Client) APIMeta(ctx context.Context) (*APIMeta, *Response, error) {
req, err := c.NewRequest("GET", "meta", nil)
if err != nil {
return nil, nil, err
}
meta := new(APIMeta)
- resp, err := c.Do(req, meta)
+ resp, err := c.Do(ctx, req, meta)
if err != nil {
return nil, resp, err
}
@@ -126,7 +127,7 @@ func (c *Client) APIMeta() (*APIMeta, *Response, error) {
// Octocat returns an ASCII art octocat with the specified message in a speech
// bubble. If message is empty, a random zen phrase is used.
-func (c *Client) Octocat(message string) (string, *Response, error) {
+func (c *Client) Octocat(ctx context.Context, message string) (string, *Response, error) {
u := "octocat"
if message != "" {
u = fmt.Sprintf("%s?s=%s", u, url.QueryEscape(message))
@@ -138,7 +139,7 @@ func (c *Client) Octocat(message string) (string, *Response, error) {
}
buf := new(bytes.Buffer)
- resp, err := c.Do(req, buf)
+ resp, err := c.Do(ctx, req, buf)
if err != nil {
return "", resp, err
}
@@ -149,14 +150,14 @@ func (c *Client) Octocat(message string) (string, *Response, error) {
// Zen returns a random line from The Zen of GitHub.
//
// see also: http://warpspire.com/posts/taste/
-func (c *Client) Zen() (string, *Response, error) {
+func (c *Client) Zen(ctx context.Context) (string, *Response, error) {
req, err := c.NewRequest("GET", "zen", nil)
if err != nil {
return "", nil, err
}
buf := new(bytes.Buffer)
- resp, err := c.Do(req, buf)
+ resp, err := c.Do(ctx, req, buf)
if err != nil {
return "", resp, err
}
@@ -180,7 +181,7 @@ func (s *ServiceHook) String() string {
// ListServiceHooks lists all of the available service hooks.
//
// GitHub API docs: https://developer.github.com/webhooks/#services
-func (c *Client) ListServiceHooks() ([]*ServiceHook, *Response, error) {
+func (c *Client) ListServiceHooks(ctx context.Context) ([]*ServiceHook, *Response, error) {
u := "hooks"
req, err := c.NewRequest("GET", u, nil)
if err != nil {
@@ -188,7 +189,7 @@ func (c *Client) ListServiceHooks() ([]*ServiceHook, *Response, error) {
}
var hooks []*ServiceHook
- resp, err := c.Do(req, &hooks)
+ resp, err := c.Do(ctx, req, &hooks)
if err != nil {
return nil, resp, err
}
diff --git a/vendor/github.com/google/go-github/github/orgs.go b/vendor/github.com/google/go-github/github/orgs.go
index 7421605..8b126f0 100644
--- a/vendor/github.com/google/go-github/github/orgs.go
+++ b/vendor/github.com/google/go-github/github/orgs.go
@@ -6,6 +6,7 @@
package github
import (
+ "context"
"fmt"
"time"
)
@@ -85,7 +86,7 @@ type OrganizationsListOptions struct {
// as the opts.Since parameter for the next call.
//
// GitHub API docs: https://developer.github.com/v3/orgs/#list-all-organizations
-func (s *OrganizationsService) ListAll(opt *OrganizationsListOptions) ([]*Organization, *Response, error) {
+func (s *OrganizationsService) ListAll(ctx context.Context, opt *OrganizationsListOptions) ([]*Organization, *Response, error) {
u, err := addOptions("organizations", opt)
if err != nil {
return nil, nil, err
@@ -97,7 +98,7 @@ func (s *OrganizationsService) ListAll(opt *OrganizationsListOptions) ([]*Organi
}
orgs := []*Organization{}
- resp, err := s.client.Do(req, &orgs)
+ resp, err := s.client.Do(ctx, req, &orgs)
if err != nil {
return nil, resp, err
}
@@ -108,7 +109,7 @@ func (s *OrganizationsService) ListAll(opt *OrganizationsListOptions) ([]*Organi
// organizations for the authenticated user.
//
// GitHub API docs: https://developer.github.com/v3/orgs/#list-user-organizations
-func (s *OrganizationsService) List(user string, opt *ListOptions) ([]*Organization, *Response, error) {
+func (s *OrganizationsService) List(ctx context.Context, user string, opt *ListOptions) ([]*Organization, *Response, error) {
var u string
if user != "" {
u = fmt.Sprintf("users/%v/orgs", user)
@@ -126,7 +127,7 @@ func (s *OrganizationsService) List(user string, opt *ListOptions) ([]*Organizat
}
var orgs []*Organization
- resp, err := s.client.Do(req, &orgs)
+ resp, err := s.client.Do(ctx, req, &orgs)
if err != nil {
return nil, resp, err
}
@@ -137,7 +138,7 @@ func (s *OrganizationsService) List(user string, opt *ListOptions) ([]*Organizat
// Get fetches an organization by name.
//
// GitHub API docs: https://developer.github.com/v3/orgs/#get-an-organization
-func (s *OrganizationsService) Get(org string) (*Organization, *Response, error) {
+func (s *OrganizationsService) Get(ctx context.Context, org string) (*Organization, *Response, error) {
u := fmt.Sprintf("orgs/%v", org)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -145,7 +146,7 @@ func (s *OrganizationsService) Get(org string) (*Organization, *Response, error)
}
organization := new(Organization)
- resp, err := s.client.Do(req, organization)
+ resp, err := s.client.Do(ctx, req, organization)
if err != nil {
return nil, resp, err
}
@@ -156,7 +157,7 @@ func (s *OrganizationsService) Get(org string) (*Organization, *Response, error)
// Edit an organization.
//
// GitHub API docs: https://developer.github.com/v3/orgs/#edit-an-organization
-func (s *OrganizationsService) Edit(name string, org *Organization) (*Organization, *Response, error) {
+func (s *OrganizationsService) Edit(ctx context.Context, name string, org *Organization) (*Organization, *Response, error) {
u := fmt.Sprintf("orgs/%v", name)
req, err := s.client.NewRequest("PATCH", u, org)
if err != nil {
@@ -164,7 +165,7 @@ func (s *OrganizationsService) Edit(name string, org *Organization) (*Organizati
}
o := new(Organization)
- resp, err := s.client.Do(req, o)
+ resp, err := s.client.Do(ctx, req, o)
if err != nil {
return nil, resp, err
}
diff --git a/vendor/github.com/google/go-github/github/orgs_hooks.go b/vendor/github.com/google/go-github/github/orgs_hooks.go
index 6dc1052..4fc692e 100644
--- a/vendor/github.com/google/go-github/github/orgs_hooks.go
+++ b/vendor/github.com/google/go-github/github/orgs_hooks.go
@@ -5,12 +5,15 @@
package github
-import "fmt"
+import (
+ "context"
+ "fmt"
+)
// ListHooks lists all Hooks for the specified organization.
//
// GitHub API docs: https://developer.github.com/v3/orgs/hooks/#list-hooks
-func (s *OrganizationsService) ListHooks(org string, opt *ListOptions) ([]*Hook, *Response, error) {
+func (s *OrganizationsService) ListHooks(ctx context.Context, org string, opt *ListOptions) ([]*Hook, *Response, error) {
u := fmt.Sprintf("orgs/%v/hooks", org)
u, err := addOptions(u, opt)
if err != nil {
@@ -23,7 +26,7 @@ func (s *OrganizationsService) ListHooks(org string, opt *ListOptions) ([]*Hook,
}
var hooks []*Hook
- resp, err := s.client.Do(req, &hooks)
+ resp, err := s.client.Do(ctx, req, &hooks)
if err != nil {
return nil, resp, err
}
@@ -34,14 +37,14 @@ func (s *OrganizationsService) ListHooks(org string, opt *ListOptions) ([]*Hook,
// GetHook returns a single specified Hook.
//
// GitHub API docs: https://developer.github.com/v3/orgs/hooks/#get-single-hook
-func (s *OrganizationsService) GetHook(org string, id int) (*Hook, *Response, error) {
+func (s *OrganizationsService) GetHook(ctx context.Context, org string, id int) (*Hook, *Response, error) {
u := fmt.Sprintf("orgs/%v/hooks/%d", org, id)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
hook := new(Hook)
- resp, err := s.client.Do(req, hook)
+ resp, err := s.client.Do(ctx, req, hook)
return hook, resp, err
}
@@ -49,7 +52,7 @@ func (s *OrganizationsService) GetHook(org string, id int) (*Hook, *Response, er
// Name and Config are required fields.
//
// GitHub API docs: https://developer.github.com/v3/orgs/hooks/#create-a-hook
-func (s *OrganizationsService) CreateHook(org string, hook *Hook) (*Hook, *Response, error) {
+func (s *OrganizationsService) CreateHook(ctx context.Context, org string, hook *Hook) (*Hook, *Response, error) {
u := fmt.Sprintf("orgs/%v/hooks", org)
req, err := s.client.NewRequest("POST", u, hook)
if err != nil {
@@ -57,7 +60,7 @@ func (s *OrganizationsService) CreateHook(org string, hook *Hook) (*Hook, *Respo
}
h := new(Hook)
- resp, err := s.client.Do(req, h)
+ resp, err := s.client.Do(ctx, req, h)
if err != nil {
return nil, resp, err
}
@@ -68,37 +71,37 @@ func (s *OrganizationsService) CreateHook(org string, hook *Hook) (*Hook, *Respo
// EditHook updates a specified Hook.
//
// GitHub API docs: https://developer.github.com/v3/orgs/hooks/#edit-a-hook
-func (s *OrganizationsService) EditHook(org string, id int, hook *Hook) (*Hook, *Response, error) {
+func (s *OrganizationsService) EditHook(ctx context.Context, org string, id int, hook *Hook) (*Hook, *Response, error) {
u := fmt.Sprintf("orgs/%v/hooks/%d", org, id)
req, err := s.client.NewRequest("PATCH", u, hook)
if err != nil {
return nil, nil, err
}
h := new(Hook)
- resp, err := s.client.Do(req, h)
+ resp, err := s.client.Do(ctx, req, h)
return h, resp, err
}
// PingHook triggers a 'ping' event to be sent to the Hook.
//
// GitHub API docs: https://developer.github.com/v3/orgs/hooks/#ping-a-hook
-func (s *OrganizationsService) PingHook(org string, id int) (*Response, error) {
+func (s *OrganizationsService) PingHook(ctx context.Context, org string, id int) (*Response, error) {
u := fmt.Sprintf("orgs/%v/hooks/%d/pings", org, id)
req, err := s.client.NewRequest("POST", u, nil)
if err != nil {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
// DeleteHook deletes a specified Hook.
//
// GitHub API docs: https://developer.github.com/v3/orgs/hooks/#delete-a-hook
-func (s *OrganizationsService) DeleteHook(org string, id int) (*Response, error) {
+func (s *OrganizationsService) DeleteHook(ctx context.Context, org string, id int) (*Response, error) {
u := fmt.Sprintf("orgs/%v/hooks/%d", org, id)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
diff --git a/vendor/github.com/google/go-github/github/orgs_members.go b/vendor/github.com/google/go-github/github/orgs_members.go
index 40dfc56..f1209c7 100644
--- a/vendor/github.com/google/go-github/github/orgs_members.go
+++ b/vendor/github.com/google/go-github/github/orgs_members.go
@@ -5,7 +5,10 @@
package github
-import "fmt"
+import (
+ "context"
+ "fmt"
+)
// Membership represents the status of a user's membership in an organization or team.
type Membership struct {
@@ -69,7 +72,7 @@ type ListMembersOptions struct {
// public members, otherwise it will only return public members.
//
// GitHub API docs: https://developer.github.com/v3/orgs/members/#members-list
-func (s *OrganizationsService) ListMembers(org string, opt *ListMembersOptions) ([]*User, *Response, error) {
+func (s *OrganizationsService) ListMembers(ctx context.Context, org string, opt *ListMembersOptions) ([]*User, *Response, error) {
var u string
if opt != nil && opt.PublicOnly {
u = fmt.Sprintf("orgs/%v/public_members", org)
@@ -87,7 +90,7 @@ func (s *OrganizationsService) ListMembers(org string, opt *ListMembersOptions)
}
var members []*User
- resp, err := s.client.Do(req, &members)
+ resp, err := s.client.Do(ctx, req, &members)
if err != nil {
return nil, resp, err
}
@@ -98,14 +101,14 @@ func (s *OrganizationsService) ListMembers(org string, opt *ListMembersOptions)
// IsMember checks if a user is a member of an organization.
//
// GitHub API docs: https://developer.github.com/v3/orgs/members/#check-membership
-func (s *OrganizationsService) IsMember(org, user string) (bool, *Response, error) {
+func (s *OrganizationsService) IsMember(ctx context.Context, org, user string) (bool, *Response, error) {
u := fmt.Sprintf("orgs/%v/members/%v", org, user)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return false, nil, err
}
- resp, err := s.client.Do(req, nil)
+ resp, err := s.client.Do(ctx, req, nil)
member, err := parseBoolResponse(err)
return member, resp, err
}
@@ -113,14 +116,14 @@ func (s *OrganizationsService) IsMember(org, user string) (bool, *Response, erro
// IsPublicMember checks if a user is a public member of an organization.
//
// GitHub API docs: https://developer.github.com/v3/orgs/members/#check-public-membership
-func (s *OrganizationsService) IsPublicMember(org, user string) (bool, *Response, error) {
+func (s *OrganizationsService) IsPublicMember(ctx context.Context, org, user string) (bool, *Response, error) {
u := fmt.Sprintf("orgs/%v/public_members/%v", org, user)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return false, nil, err
}
- resp, err := s.client.Do(req, nil)
+ resp, err := s.client.Do(ctx, req, nil)
member, err := parseBoolResponse(err)
return member, resp, err
}
@@ -128,41 +131,41 @@ func (s *OrganizationsService) IsPublicMember(org, user string) (bool, *Response
// RemoveMember removes a user from all teams of an organization.
//
// GitHub API docs: https://developer.github.com/v3/orgs/members/#remove-a-member
-func (s *OrganizationsService) RemoveMember(org, user string) (*Response, error) {
+func (s *OrganizationsService) RemoveMember(ctx context.Context, org, user string) (*Response, error) {
u := fmt.Sprintf("orgs/%v/members/%v", org, user)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
// PublicizeMembership publicizes a user's membership in an organization. (A
// user cannot publicize the membership for another user.)
//
// GitHub API docs: https://developer.github.com/v3/orgs/members/#publicize-a-users-membership
-func (s *OrganizationsService) PublicizeMembership(org, user string) (*Response, error) {
+func (s *OrganizationsService) PublicizeMembership(ctx context.Context, org, user string) (*Response, error) {
u := fmt.Sprintf("orgs/%v/public_members/%v", org, user)
req, err := s.client.NewRequest("PUT", u, nil)
if err != nil {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
// ConcealMembership conceals a user's membership in an organization.
//
// GitHub API docs: https://developer.github.com/v3/orgs/members/#conceal-a-users-membership
-func (s *OrganizationsService) ConcealMembership(org, user string) (*Response, error) {
+func (s *OrganizationsService) ConcealMembership(ctx context.Context, org, user string) (*Response, error) {
u := fmt.Sprintf("orgs/%v/public_members/%v", org, user)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
// ListOrgMembershipsOptions specifies optional parameters to the
@@ -178,7 +181,7 @@ type ListOrgMembershipsOptions struct {
// ListOrgMemberships lists the organization memberships for the authenticated user.
//
// GitHub API docs: https://developer.github.com/v3/orgs/members/#list-your-organization-memberships
-func (s *OrganizationsService) ListOrgMemberships(opt *ListOrgMembershipsOptions) ([]*Membership, *Response, error) {
+func (s *OrganizationsService) ListOrgMemberships(ctx context.Context, opt *ListOrgMembershipsOptions) ([]*Membership, *Response, error) {
u := "user/memberships/orgs"
u, err := addOptions(u, opt)
if err != nil {
@@ -191,7 +194,7 @@ func (s *OrganizationsService) ListOrgMemberships(opt *ListOrgMembershipsOptions
}
var memberships []*Membership
- resp, err := s.client.Do(req, &memberships)
+ resp, err := s.client.Do(ctx, req, &memberships)
if err != nil {
return nil, resp, err
}
@@ -203,9 +206,10 @@ func (s *OrganizationsService) ListOrgMemberships(opt *ListOrgMembershipsOptions
// Passing an empty string for user will get the membership for the
// authenticated user.
//
-// GitHub API docs: https://developer.github.com/v3/orgs/members/#get-organization-membership
-// GitHub API docs: https://developer.github.com/v3/orgs/members/#get-your-organization-membership
-func (s *OrganizationsService) GetOrgMembership(user, org string) (*Membership, *Response, error) {
+// GitHub API docs:
+// https://developer.github.com/v3/orgs/members/#get-organization-membership
+// https://developer.github.com/v3/orgs/members/#get-your-organization-membership
+func (s *OrganizationsService) GetOrgMembership(ctx context.Context, user, org string) (*Membership, *Response, error) {
var u string
if user != "" {
u = fmt.Sprintf("orgs/%v/memberships/%v", org, user)
@@ -219,7 +223,7 @@ func (s *OrganizationsService) GetOrgMembership(user, org string) (*Membership,
}
membership := new(Membership)
- resp, err := s.client.Do(req, membership)
+ resp, err := s.client.Do(ctx, req, membership)
if err != nil {
return nil, resp, err
}
@@ -233,7 +237,7 @@ func (s *OrganizationsService) GetOrgMembership(user, org string) (*Membership,
//
// GitHub API docs: https://developer.github.com/v3/orgs/members/#add-or-update-organization-membership
// GitHub API docs: https://developer.github.com/v3/orgs/members/#edit-your-organization-membership
-func (s *OrganizationsService) EditOrgMembership(user, org string, membership *Membership) (*Membership, *Response, error) {
+func (s *OrganizationsService) EditOrgMembership(ctx context.Context, user, org string, membership *Membership) (*Membership, *Response, error) {
var u, method string
if user != "" {
u = fmt.Sprintf("orgs/%v/memberships/%v", org, user)
@@ -249,7 +253,7 @@ func (s *OrganizationsService) EditOrgMembership(user, org string, membership *M
}
m := new(Membership)
- resp, err := s.client.Do(req, m)
+ resp, err := s.client.Do(ctx, req, m)
if err != nil {
return nil, resp, err
}
@@ -261,20 +265,20 @@ func (s *OrganizationsService) EditOrgMembership(user, org string, membership *M
// user has been invited to the organization, this will cancel their invitation.
//
// GitHub API docs: https://developer.github.com/v3/orgs/members/#remove-organization-membership
-func (s *OrganizationsService) RemoveOrgMembership(user, org string) (*Response, error) {
+func (s *OrganizationsService) RemoveOrgMembership(ctx context.Context, user, org string) (*Response, error) {
u := fmt.Sprintf("orgs/%v/memberships/%v", org, user)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
// ListPendingOrgInvitations returns a list of pending invitations.
//
// GitHub API docs: https://developer.github.com/v3/orgs/members/#list-pending-organization-invitations
-func (s *OrganizationsService) ListPendingOrgInvitations(org int, opt *ListOptions) ([]*Invitation, *Response, error) {
+func (s *OrganizationsService) ListPendingOrgInvitations(ctx context.Context, org int, opt *ListOptions) ([]*Invitation, *Response, error) {
u := fmt.Sprintf("orgs/%v/invitations", org)
u, err := addOptions(u, opt)
if err != nil {
@@ -286,11 +290,8 @@ func (s *OrganizationsService) ListPendingOrgInvitations(org int, opt *ListOptio
return nil, nil, err
}
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeOrgMembershipPreview)
-
var pendingInvitations []*Invitation
- resp, err := s.client.Do(req, &pendingInvitations)
+ resp, err := s.client.Do(ctx, req, &pendingInvitations)
if err != nil {
return nil, resp, err
}
diff --git a/vendor/github.com/google/go-github/github/orgs_outside_collaborators.go b/vendor/github.com/google/go-github/github/orgs_outside_collaborators.go
new file mode 100644
index 0000000..e34f865
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/orgs_outside_collaborators.go
@@ -0,0 +1,50 @@
+// Copyright 2017 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "context"
+ "fmt"
+)
+
+// ListOutsideCollaboratorsOptions specifies optional parameters to the
+// OrganizationsService.ListOutsideCollaborators method.
+type ListOutsideCollaboratorsOptions struct {
+ // Filter outside collaborators returned in the list. Possible values are:
+ // 2fa_disabled, all. Default is "all".
+ Filter string `url:"filter,omitempty"`
+
+ ListOptions
+}
+
+// ListOutsideCollaborators lists outside collaborators of organization's repositories.
+// This will only work if the authenticated
+// user is an owner of the organization.
+//
+// Warning: The API may change without advance notice during the preview period.
+// Preview features are not supported for production use.
+//
+// GitHub API docs: https://developer.github.com/v3/orgs/outside_collaborators/#list-outside-collaborators
+func (s *OrganizationsService) ListOutsideCollaborators(ctx context.Context, org string, opt *ListOutsideCollaboratorsOptions) ([]*User, *Response, error) {
+ u := fmt.Sprintf("orgs/%v/outside_collaborators", org)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var members []*User
+ resp, err := s.client.Do(ctx, req, &members)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return members, resp, nil
+}
diff --git a/vendor/github.com/google/go-github/github/orgs_projects.go b/vendor/github.com/google/go-github/github/orgs_projects.go
new file mode 100644
index 0000000..e57cba9
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/orgs_projects.go
@@ -0,0 +1,60 @@
+// Copyright 2017 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "context"
+ "fmt"
+)
+
+// ListProjects lists the projects for an organization.
+//
+// GitHub API docs: https://developer.github.com/v3/projects/#list-organization-projects
+func (s *OrganizationsService) ListProjects(ctx context.Context, org string, opt *ProjectListOptions) ([]*Project, *Response, error) {
+ u := fmt.Sprintf("orgs/%v/projects", org)
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeProjectsPreview)
+
+ var projects []*Project
+ resp, err := s.client.Do(ctx, req, &projects)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return projects, resp, nil
+}
+
+// CreateProject creates a GitHub Project for the specified organization.
+//
+// GitHub API docs: https://developer.github.com/v3/projects/#create-an-organization-project
+func (s *OrganizationsService) CreateProject(ctx context.Context, org string, opt *ProjectOptions) (*Project, *Response, error) {
+ u := fmt.Sprintf("orgs/%v/projects", org)
+ req, err := s.client.NewRequest("POST", u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeProjectsPreview)
+
+ project := &Project{}
+ resp, err := s.client.Do(ctx, req, project)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return project, resp, nil
+}
diff --git a/vendor/github.com/google/go-github/github/orgs_teams.go b/vendor/github.com/google/go-github/github/orgs_teams.go
index c1818e5..70b090d 100644
--- a/vendor/github.com/google/go-github/github/orgs_teams.go
+++ b/vendor/github.com/google/go-github/github/orgs_teams.go
@@ -6,6 +6,7 @@
package github
import (
+ "context"
"fmt"
"time"
)
@@ -38,6 +39,10 @@ type Team struct {
Organization *Organization `json:"organization,omitempty"`
MembersURL *string `json:"members_url,omitempty"`
RepositoriesURL *string `json:"repositories_url,omitempty"`
+
+ // LDAPDN is only available in GitHub Enterprise and when the team
+ // membership is synchronized with LDAP.
+ LDAPDN *string `json:"ldap_dn,omitempty"`
}
func (t Team) String() string {
@@ -62,7 +67,7 @@ func (i Invitation) String() string {
// ListTeams lists all of the teams for an organization.
//
// GitHub API docs: https://developer.github.com/v3/orgs/teams/#list-teams
-func (s *OrganizationsService) ListTeams(org string, opt *ListOptions) ([]*Team, *Response, error) {
+func (s *OrganizationsService) ListTeams(ctx context.Context, org string, opt *ListOptions) ([]*Team, *Response, error) {
u := fmt.Sprintf("orgs/%v/teams", org)
u, err := addOptions(u, opt)
if err != nil {
@@ -75,7 +80,7 @@ func (s *OrganizationsService) ListTeams(org string, opt *ListOptions) ([]*Team,
}
var teams []*Team
- resp, err := s.client.Do(req, &teams)
+ resp, err := s.client.Do(ctx, req, &teams)
if err != nil {
return nil, resp, err
}
@@ -86,7 +91,7 @@ func (s *OrganizationsService) ListTeams(org string, opt *ListOptions) ([]*Team,
// GetTeam fetches a team by ID.
//
// GitHub API docs: https://developer.github.com/v3/orgs/teams/#get-team
-func (s *OrganizationsService) GetTeam(team int) (*Team, *Response, error) {
+func (s *OrganizationsService) GetTeam(ctx context.Context, team int) (*Team, *Response, error) {
u := fmt.Sprintf("teams/%v", team)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -94,7 +99,7 @@ func (s *OrganizationsService) GetTeam(team int) (*Team, *Response, error) {
}
t := new(Team)
- resp, err := s.client.Do(req, t)
+ resp, err := s.client.Do(ctx, req, t)
if err != nil {
return nil, resp, err
}
@@ -105,7 +110,7 @@ func (s *OrganizationsService) GetTeam(team int) (*Team, *Response, error) {
// CreateTeam creates a new team within an organization.
//
// GitHub API docs: https://developer.github.com/v3/orgs/teams/#create-team
-func (s *OrganizationsService) CreateTeam(org string, team *Team) (*Team, *Response, error) {
+func (s *OrganizationsService) CreateTeam(ctx context.Context, org string, team *Team) (*Team, *Response, error) {
u := fmt.Sprintf("orgs/%v/teams", org)
req, err := s.client.NewRequest("POST", u, team)
if err != nil {
@@ -113,7 +118,7 @@ func (s *OrganizationsService) CreateTeam(org string, team *Team) (*Team, *Respo
}
t := new(Team)
- resp, err := s.client.Do(req, t)
+ resp, err := s.client.Do(ctx, req, t)
if err != nil {
return nil, resp, err
}
@@ -124,7 +129,7 @@ func (s *OrganizationsService) CreateTeam(org string, team *Team) (*Team, *Respo
// EditTeam edits a team.
//
// GitHub API docs: https://developer.github.com/v3/orgs/teams/#edit-team
-func (s *OrganizationsService) EditTeam(id int, team *Team) (*Team, *Response, error) {
+func (s *OrganizationsService) EditTeam(ctx context.Context, id int, team *Team) (*Team, *Response, error) {
u := fmt.Sprintf("teams/%v", id)
req, err := s.client.NewRequest("PATCH", u, team)
if err != nil {
@@ -132,7 +137,7 @@ func (s *OrganizationsService) EditTeam(id int, team *Team) (*Team, *Response, e
}
t := new(Team)
- resp, err := s.client.Do(req, t)
+ resp, err := s.client.Do(ctx, req, t)
if err != nil {
return nil, resp, err
}
@@ -143,14 +148,14 @@ func (s *OrganizationsService) EditTeam(id int, team *Team) (*Team, *Response, e
// DeleteTeam deletes a team.
//
// GitHub API docs: https://developer.github.com/v3/orgs/teams/#delete-team
-func (s *OrganizationsService) DeleteTeam(team int) (*Response, error) {
+func (s *OrganizationsService) DeleteTeam(ctx context.Context, team int) (*Response, error) {
u := fmt.Sprintf("teams/%v", team)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
// OrganizationListTeamMembersOptions specifies the optional parameters to the
@@ -167,7 +172,7 @@ type OrganizationListTeamMembersOptions struct {
// team.
//
// GitHub API docs: https://developer.github.com/v3/orgs/teams/#list-team-members
-func (s *OrganizationsService) ListTeamMembers(team int, opt *OrganizationListTeamMembersOptions) ([]*User, *Response, error) {
+func (s *OrganizationsService) ListTeamMembers(ctx context.Context, team int, opt *OrganizationListTeamMembersOptions) ([]*User, *Response, error) {
u := fmt.Sprintf("teams/%v/members", team)
u, err := addOptions(u, opt)
if err != nil {
@@ -180,7 +185,7 @@ func (s *OrganizationsService) ListTeamMembers(team int, opt *OrganizationListTe
}
var members []*User
- resp, err := s.client.Do(req, &members)
+ resp, err := s.client.Do(ctx, req, &members)
if err != nil {
return nil, resp, err
}
@@ -191,14 +196,14 @@ func (s *OrganizationsService) ListTeamMembers(team int, opt *OrganizationListTe
// IsTeamMember checks if a user is a member of the specified team.
//
// GitHub API docs: https://developer.github.com/v3/orgs/teams/#get-team-member
-func (s *OrganizationsService) IsTeamMember(team int, user string) (bool, *Response, error) {
+func (s *OrganizationsService) IsTeamMember(ctx context.Context, team int, user string) (bool, *Response, error) {
u := fmt.Sprintf("teams/%v/members/%v", team, user)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return false, nil, err
}
- resp, err := s.client.Do(req, nil)
+ resp, err := s.client.Do(ctx, req, nil)
member, err := parseBoolResponse(err)
return member, resp, err
}
@@ -206,7 +211,7 @@ func (s *OrganizationsService) IsTeamMember(team int, user string) (bool, *Respo
// ListTeamRepos lists the repositories that the specified team has access to.
//
// GitHub API docs: https://developer.github.com/v3/orgs/teams/#list-team-repos
-func (s *OrganizationsService) ListTeamRepos(team int, opt *ListOptions) ([]*Repository, *Response, error) {
+func (s *OrganizationsService) ListTeamRepos(ctx context.Context, team int, opt *ListOptions) ([]*Repository, *Response, error) {
u := fmt.Sprintf("teams/%v/repos", team)
u, err := addOptions(u, opt)
if err != nil {
@@ -219,7 +224,7 @@ func (s *OrganizationsService) ListTeamRepos(team int, opt *ListOptions) ([]*Rep
}
var repos []*Repository
- resp, err := s.client.Do(req, &repos)
+ resp, err := s.client.Do(ctx, req, &repos)
if err != nil {
return nil, resp, err
}
@@ -232,7 +237,7 @@ func (s *OrganizationsService) ListTeamRepos(team int, opt *ListOptions) ([]*Rep
// permissions team has for that repo.
//
// GitHub API docs: https://developer.github.com/v3/orgs/teams/#check-if-a-team-manages-a-repository
-func (s *OrganizationsService) IsTeamRepo(team int, owner string, repo string) (*Repository, *Response, error) {
+func (s *OrganizationsService) IsTeamRepo(ctx context.Context, team int, owner string, repo string) (*Repository, *Response, error) {
u := fmt.Sprintf("teams/%v/repos/%v/%v", team, owner, repo)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -242,7 +247,7 @@ func (s *OrganizationsService) IsTeamRepo(team int, owner string, repo string) (
req.Header.Set("Accept", mediaTypeOrgPermissionRepo)
repository := new(Repository)
- resp, err := s.client.Do(req, repository)
+ resp, err := s.client.Do(ctx, req, repository)
if err != nil {
return nil, resp, err
}
@@ -268,14 +273,14 @@ type OrganizationAddTeamRepoOptions struct {
// belongs, or a direct fork of a repository owned by the organization.
//
// GitHub API docs: https://developer.github.com/v3/orgs/teams/#add-team-repo
-func (s *OrganizationsService) AddTeamRepo(team int, owner string, repo string, opt *OrganizationAddTeamRepoOptions) (*Response, error) {
+func (s *OrganizationsService) AddTeamRepo(ctx context.Context, team int, owner string, repo string, opt *OrganizationAddTeamRepoOptions) (*Response, error) {
u := fmt.Sprintf("teams/%v/repos/%v/%v", team, owner, repo)
req, err := s.client.NewRequest("PUT", u, opt)
if err != nil {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
// RemoveTeamRepo removes a repository from being managed by the specified
@@ -283,19 +288,19 @@ func (s *OrganizationsService) AddTeamRepo(team int, owner string, repo string,
// from the team.
//
// GitHub API docs: https://developer.github.com/v3/orgs/teams/#remove-team-repo
-func (s *OrganizationsService) RemoveTeamRepo(team int, owner string, repo string) (*Response, error) {
+func (s *OrganizationsService) RemoveTeamRepo(ctx context.Context, team int, owner string, repo string) (*Response, error) {
u := fmt.Sprintf("teams/%v/repos/%v/%v", team, owner, repo)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
// ListUserTeams lists a user's teams
// GitHub API docs: https://developer.github.com/v3/orgs/teams/#list-user-teams
-func (s *OrganizationsService) ListUserTeams(opt *ListOptions) ([]*Team, *Response, error) {
+func (s *OrganizationsService) ListUserTeams(ctx context.Context, opt *ListOptions) ([]*Team, *Response, error) {
u := "user/teams"
u, err := addOptions(u, opt)
if err != nil {
@@ -308,7 +313,7 @@ func (s *OrganizationsService) ListUserTeams(opt *ListOptions) ([]*Team, *Respon
}
var teams []*Team
- resp, err := s.client.Do(req, &teams)
+ resp, err := s.client.Do(ctx, req, &teams)
if err != nil {
return nil, resp, err
}
@@ -319,7 +324,7 @@ func (s *OrganizationsService) ListUserTeams(opt *ListOptions) ([]*Team, *Respon
// GetTeamMembership returns the membership status for a user in a team.
//
// GitHub API docs: https://developer.github.com/v3/orgs/teams/#get-team-membership
-func (s *OrganizationsService) GetTeamMembership(team int, user string) (*Membership, *Response, error) {
+func (s *OrganizationsService) GetTeamMembership(ctx context.Context, team int, user string) (*Membership, *Response, error) {
u := fmt.Sprintf("teams/%v/memberships/%v", team, user)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -327,7 +332,7 @@ func (s *OrganizationsService) GetTeamMembership(team int, user string) (*Member
}
t := new(Membership)
- resp, err := s.client.Do(req, t)
+ resp, err := s.client.Do(ctx, req, t)
if err != nil {
return nil, resp, err
}
@@ -367,7 +372,7 @@ type OrganizationAddTeamMembershipOptions struct {
// added as a member of the team.
//
// GitHub API docs: https://developer.github.com/v3/orgs/teams/#add-team-membership
-func (s *OrganizationsService) AddTeamMembership(team int, user string, opt *OrganizationAddTeamMembershipOptions) (*Membership, *Response, error) {
+func (s *OrganizationsService) AddTeamMembership(ctx context.Context, team int, user string, opt *OrganizationAddTeamMembershipOptions) (*Membership, *Response, error) {
u := fmt.Sprintf("teams/%v/memberships/%v", team, user)
req, err := s.client.NewRequest("PUT", u, opt)
if err != nil {
@@ -375,7 +380,7 @@ func (s *OrganizationsService) AddTeamMembership(team int, user string, opt *Org
}
t := new(Membership)
- resp, err := s.client.Do(req, t)
+ resp, err := s.client.Do(ctx, req, t)
if err != nil {
return nil, resp, err
}
@@ -386,14 +391,14 @@ func (s *OrganizationsService) AddTeamMembership(team int, user string, opt *Org
// RemoveTeamMembership removes a user from a team.
//
// GitHub API docs: https://developer.github.com/v3/orgs/teams/#remove-team-membership
-func (s *OrganizationsService) RemoveTeamMembership(team int, user string) (*Response, error) {
+func (s *OrganizationsService) RemoveTeamMembership(ctx context.Context, team int, user string) (*Response, error) {
u := fmt.Sprintf("teams/%v/memberships/%v", team, user)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
// ListPendingTeamInvitations get pending invitaion list in team.
@@ -401,7 +406,7 @@ func (s *OrganizationsService) RemoveTeamMembership(team int, user string) (*Res
// Preview features are not supported for production use.
//
// GitHub API docs: https://developer.github.com/v3/orgs/teams/#list-pending-team-invitations
-func (s *OrganizationsService) ListPendingTeamInvitations(team int, opt *ListOptions) ([]*Invitation, *Response, error) {
+func (s *OrganizationsService) ListPendingTeamInvitations(ctx context.Context, team int, opt *ListOptions) ([]*Invitation, *Response, error) {
u := fmt.Sprintf("teams/%v/invitations", team)
u, err := addOptions(u, opt)
if err != nil {
@@ -413,11 +418,8 @@ func (s *OrganizationsService) ListPendingTeamInvitations(team int, opt *ListOpt
return nil, nil, err
}
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeOrgMembershipPreview)
-
var pendingInvitations []*Invitation
- resp, err := s.client.Do(req, &pendingInvitations)
+ resp, err := s.client.Do(ctx, req, &pendingInvitations)
if err != nil {
return nil, resp, err
}
diff --git a/vendor/github.com/google/go-github/github/projects.go b/vendor/github.com/google/go-github/github/projects.go
index 766f002..58b638e 100644
--- a/vendor/github.com/google/go-github/github/projects.go
+++ b/vendor/github.com/google/go-github/github/projects.go
@@ -5,7 +5,10 @@
package github
-import "fmt"
+import (
+ "context"
+ "fmt"
+)
// ProjectsService provides access to the projects functions in the
// GitHub API.
@@ -35,7 +38,7 @@ func (p Project) String() string {
// GetProject gets a GitHub Project for a repo.
//
// GitHub API docs: https://developer.github.com/v3/projects/#get-a-project
-func (s *ProjectsService) GetProject(id int) (*Project, *Response, error) {
+func (s *ProjectsService) GetProject(ctx context.Context, id int) (*Project, *Response, error) {
u := fmt.Sprintf("projects/%v", id)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -46,7 +49,7 @@ func (s *ProjectsService) GetProject(id int) (*Project, *Response, error) {
req.Header.Set("Accept", mediaTypeProjectsPreview)
project := &Project{}
- resp, err := s.client.Do(req, project)
+ resp, err := s.client.Do(ctx, req, project)
if err != nil {
return nil, resp, err
}
@@ -67,7 +70,7 @@ type ProjectOptions struct {
// UpdateProject updates a repository project.
//
// GitHub API docs: https://developer.github.com/v3/projects/#update-a-project
-func (s *ProjectsService) UpdateProject(id int, opt *ProjectOptions) (*Project, *Response, error) {
+func (s *ProjectsService) UpdateProject(ctx context.Context, id int, opt *ProjectOptions) (*Project, *Response, error) {
u := fmt.Sprintf("projects/%v", id)
req, err := s.client.NewRequest("PATCH", u, opt)
if err != nil {
@@ -78,7 +81,7 @@ func (s *ProjectsService) UpdateProject(id int, opt *ProjectOptions) (*Project,
req.Header.Set("Accept", mediaTypeProjectsPreview)
project := &Project{}
- resp, err := s.client.Do(req, project)
+ resp, err := s.client.Do(ctx, req, project)
if err != nil {
return nil, resp, err
}
@@ -89,7 +92,7 @@ func (s *ProjectsService) UpdateProject(id int, opt *ProjectOptions) (*Project,
// DeleteProject deletes a GitHub Project from a repository.
//
// GitHub API docs: https://developer.github.com/v3/projects/#delete-a-project
-func (s *ProjectsService) DeleteProject(id int) (*Response, error) {
+func (s *ProjectsService) DeleteProject(ctx context.Context, id int) (*Response, error) {
u := fmt.Sprintf("projects/%v", id)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
@@ -99,7 +102,7 @@ func (s *ProjectsService) DeleteProject(id int) (*Response, error) {
// TODO: remove custom Accept header when this API fully launches.
req.Header.Set("Accept", mediaTypeProjectsPreview)
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
// ProjectColumn represents a column of a GitHub Project.
@@ -116,7 +119,7 @@ type ProjectColumn struct {
// ListProjectColumns lists the columns of a GitHub Project for a repo.
//
// GitHub API docs: https://developer.github.com/v3/projects/columns/#list-project-columns
-func (s *ProjectsService) ListProjectColumns(projectID int, opt *ListOptions) ([]*ProjectColumn, *Response, error) {
+func (s *ProjectsService) ListProjectColumns(ctx context.Context, projectID int, opt *ListOptions) ([]*ProjectColumn, *Response, error) {
u := fmt.Sprintf("projects/%v/columns", projectID)
u, err := addOptions(u, opt)
if err != nil {
@@ -132,7 +135,7 @@ func (s *ProjectsService) ListProjectColumns(projectID int, opt *ListOptions) ([
req.Header.Set("Accept", mediaTypeProjectsPreview)
columns := []*ProjectColumn{}
- resp, err := s.client.Do(req, &columns)
+ resp, err := s.client.Do(ctx, req, &columns)
if err != nil {
return nil, resp, err
}
@@ -143,7 +146,7 @@ func (s *ProjectsService) ListProjectColumns(projectID int, opt *ListOptions) ([
// GetProjectColumn gets a column of a GitHub Project for a repo.
//
// GitHub API docs: https://developer.github.com/v3/projects/columns/#get-a-project-column
-func (s *ProjectsService) GetProjectColumn(id int) (*ProjectColumn, *Response, error) {
+func (s *ProjectsService) GetProjectColumn(ctx context.Context, id int) (*ProjectColumn, *Response, error) {
u := fmt.Sprintf("projects/columns/%v", id)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -154,7 +157,7 @@ func (s *ProjectsService) GetProjectColumn(id int) (*ProjectColumn, *Response, e
req.Header.Set("Accept", mediaTypeProjectsPreview)
column := &ProjectColumn{}
- resp, err := s.client.Do(req, column)
+ resp, err := s.client.Do(ctx, req, column)
if err != nil {
return nil, resp, err
}
@@ -173,7 +176,7 @@ type ProjectColumnOptions struct {
// CreateProjectColumn creates a column for the specified (by number) project.
//
// GitHub API docs: https://developer.github.com/v3/projects/columns/#create-a-project-column
-func (s *ProjectsService) CreateProjectColumn(projectID int, opt *ProjectColumnOptions) (*ProjectColumn, *Response, error) {
+func (s *ProjectsService) CreateProjectColumn(ctx context.Context, projectID int, opt *ProjectColumnOptions) (*ProjectColumn, *Response, error) {
u := fmt.Sprintf("projects/%v/columns", projectID)
req, err := s.client.NewRequest("POST", u, opt)
if err != nil {
@@ -184,7 +187,7 @@ func (s *ProjectsService) CreateProjectColumn(projectID int, opt *ProjectColumnO
req.Header.Set("Accept", mediaTypeProjectsPreview)
column := &ProjectColumn{}
- resp, err := s.client.Do(req, column)
+ resp, err := s.client.Do(ctx, req, column)
if err != nil {
return nil, resp, err
}
@@ -195,7 +198,7 @@ func (s *ProjectsService) CreateProjectColumn(projectID int, opt *ProjectColumnO
// UpdateProjectColumn updates a column of a GitHub Project.
//
// GitHub API docs: https://developer.github.com/v3/projects/columns/#update-a-project-column
-func (s *ProjectsService) UpdateProjectColumn(columnID int, opt *ProjectColumnOptions) (*ProjectColumn, *Response, error) {
+func (s *ProjectsService) UpdateProjectColumn(ctx context.Context, columnID int, opt *ProjectColumnOptions) (*ProjectColumn, *Response, error) {
u := fmt.Sprintf("projects/columns/%v", columnID)
req, err := s.client.NewRequest("PATCH", u, opt)
if err != nil {
@@ -206,7 +209,7 @@ func (s *ProjectsService) UpdateProjectColumn(columnID int, opt *ProjectColumnOp
req.Header.Set("Accept", mediaTypeProjectsPreview)
column := &ProjectColumn{}
- resp, err := s.client.Do(req, column)
+ resp, err := s.client.Do(ctx, req, column)
if err != nil {
return nil, resp, err
}
@@ -217,7 +220,7 @@ func (s *ProjectsService) UpdateProjectColumn(columnID int, opt *ProjectColumnOp
// DeleteProjectColumn deletes a column from a GitHub Project.
//
// GitHub API docs: https://developer.github.com/v3/projects/columns/#delete-a-project-column
-func (s *ProjectsService) DeleteProjectColumn(columnID int) (*Response, error) {
+func (s *ProjectsService) DeleteProjectColumn(ctx context.Context, columnID int) (*Response, error) {
u := fmt.Sprintf("projects/columns/%v", columnID)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
@@ -227,7 +230,7 @@ func (s *ProjectsService) DeleteProjectColumn(columnID int) (*Response, error) {
// TODO: remove custom Accept header when this API fully launches.
req.Header.Set("Accept", mediaTypeProjectsPreview)
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
// ProjectColumnMoveOptions specifies the parameters to the
@@ -241,7 +244,7 @@ type ProjectColumnMoveOptions struct {
// MoveProjectColumn moves a column within a GitHub Project.
//
// GitHub API docs: https://developer.github.com/v3/projects/columns/#move-a-project-column
-func (s *ProjectsService) MoveProjectColumn(columnID int, opt *ProjectColumnMoveOptions) (*Response, error) {
+func (s *ProjectsService) MoveProjectColumn(ctx context.Context, columnID int, opt *ProjectColumnMoveOptions) (*Response, error) {
u := fmt.Sprintf("projects/columns/%v/moves", columnID)
req, err := s.client.NewRequest("POST", u, opt)
if err != nil {
@@ -251,7 +254,7 @@ func (s *ProjectsService) MoveProjectColumn(columnID int, opt *ProjectColumnMove
// TODO: remove custom Accept header when this API fully launches.
req.Header.Set("Accept", mediaTypeProjectsPreview)
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
// ProjectCard represents a card in a column of a GitHub Project.
@@ -269,7 +272,7 @@ type ProjectCard struct {
// ListProjectCards lists the cards in a column of a GitHub Project.
//
// GitHub API docs: https://developer.github.com/v3/projects/cards/#list-project-cards
-func (s *ProjectsService) ListProjectCards(columnID int, opt *ListOptions) ([]*ProjectCard, *Response, error) {
+func (s *ProjectsService) ListProjectCards(ctx context.Context, columnID int, opt *ListOptions) ([]*ProjectCard, *Response, error) {
u := fmt.Sprintf("projects/columns/%v/cards", columnID)
u, err := addOptions(u, opt)
if err != nil {
@@ -285,7 +288,7 @@ func (s *ProjectsService) ListProjectCards(columnID int, opt *ListOptions) ([]*P
req.Header.Set("Accept", mediaTypeProjectsPreview)
cards := []*ProjectCard{}
- resp, err := s.client.Do(req, &cards)
+ resp, err := s.client.Do(ctx, req, &cards)
if err != nil {
return nil, resp, err
}
@@ -296,7 +299,7 @@ func (s *ProjectsService) ListProjectCards(columnID int, opt *ListOptions) ([]*P
// GetProjectCard gets a card in a column of a GitHub Project.
//
// GitHub API docs: https://developer.github.com/v3/projects/cards/#get-a-project-card
-func (s *ProjectsService) GetProjectCard(columnID int) (*ProjectCard, *Response, error) {
+func (s *ProjectsService) GetProjectCard(ctx context.Context, columnID int) (*ProjectCard, *Response, error) {
u := fmt.Sprintf("projects/columns/cards/%v", columnID)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -307,7 +310,7 @@ func (s *ProjectsService) GetProjectCard(columnID int) (*ProjectCard, *Response,
req.Header.Set("Accept", mediaTypeProjectsPreview)
card := &ProjectCard{}
- resp, err := s.client.Do(req, card)
+ resp, err := s.client.Do(ctx, req, card)
if err != nil {
return nil, resp, err
}
@@ -331,7 +334,7 @@ type ProjectCardOptions struct {
// CreateProjectCard creates a card in the specified column of a GitHub Project.
//
// GitHub API docs: https://developer.github.com/v3/projects/cards/#create-a-project-card
-func (s *ProjectsService) CreateProjectCard(columnID int, opt *ProjectCardOptions) (*ProjectCard, *Response, error) {
+func (s *ProjectsService) CreateProjectCard(ctx context.Context, columnID int, opt *ProjectCardOptions) (*ProjectCard, *Response, error) {
u := fmt.Sprintf("projects/columns/%v/cards", columnID)
req, err := s.client.NewRequest("POST", u, opt)
if err != nil {
@@ -342,7 +345,7 @@ func (s *ProjectsService) CreateProjectCard(columnID int, opt *ProjectCardOption
req.Header.Set("Accept", mediaTypeProjectsPreview)
card := &ProjectCard{}
- resp, err := s.client.Do(req, card)
+ resp, err := s.client.Do(ctx, req, card)
if err != nil {
return nil, resp, err
}
@@ -353,7 +356,7 @@ func (s *ProjectsService) CreateProjectCard(columnID int, opt *ProjectCardOption
// UpdateProjectCard updates a card of a GitHub Project.
//
// GitHub API docs: https://developer.github.com/v3/projects/cards/#update-a-project-card
-func (s *ProjectsService) UpdateProjectCard(cardID int, opt *ProjectCardOptions) (*ProjectCard, *Response, error) {
+func (s *ProjectsService) UpdateProjectCard(ctx context.Context, cardID int, opt *ProjectCardOptions) (*ProjectCard, *Response, error) {
u := fmt.Sprintf("projects/columns/cards/%v", cardID)
req, err := s.client.NewRequest("PATCH", u, opt)
if err != nil {
@@ -364,7 +367,7 @@ func (s *ProjectsService) UpdateProjectCard(cardID int, opt *ProjectCardOptions)
req.Header.Set("Accept", mediaTypeProjectsPreview)
card := &ProjectCard{}
- resp, err := s.client.Do(req, card)
+ resp, err := s.client.Do(ctx, req, card)
if err != nil {
return nil, resp, err
}
@@ -375,7 +378,7 @@ func (s *ProjectsService) UpdateProjectCard(cardID int, opt *ProjectCardOptions)
// DeleteProjectCard deletes a card from a GitHub Project.
//
// GitHub API docs: https://developer.github.com/v3/projects/cards/#delete-a-project-card
-func (s *ProjectsService) DeleteProjectCard(cardID int) (*Response, error) {
+func (s *ProjectsService) DeleteProjectCard(ctx context.Context, cardID int) (*Response, error) {
u := fmt.Sprintf("projects/columns/cards/%v", cardID)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
@@ -385,7 +388,7 @@ func (s *ProjectsService) DeleteProjectCard(cardID int) (*Response, error) {
// TODO: remove custom Accept header when this API fully launches.
req.Header.Set("Accept", mediaTypeProjectsPreview)
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
// ProjectCardMoveOptions specifies the parameters to the
@@ -403,7 +406,7 @@ type ProjectCardMoveOptions struct {
// MoveProjectCard moves a card within a GitHub Project.
//
// GitHub API docs: https://developer.github.com/v3/projects/cards/#move-a-project-card
-func (s *ProjectsService) MoveProjectCard(cardID int, opt *ProjectCardMoveOptions) (*Response, error) {
+func (s *ProjectsService) MoveProjectCard(ctx context.Context, cardID int, opt *ProjectCardMoveOptions) (*Response, error) {
u := fmt.Sprintf("projects/columns/cards/%v/moves", cardID)
req, err := s.client.NewRequest("POST", u, opt)
if err != nil {
@@ -413,5 +416,5 @@ func (s *ProjectsService) MoveProjectCard(cardID int, opt *ProjectCardMoveOption
// TODO: remove custom Accept header when this API fully launches.
req.Header.Set("Accept", mediaTypeProjectsPreview)
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
diff --git a/vendor/github.com/google/go-github/github/pulls.go b/vendor/github.com/google/go-github/github/pulls.go
index 5b5667b..0fdb4cd 100644
--- a/vendor/github.com/google/go-github/github/pulls.go
+++ b/vendor/github.com/google/go-github/github/pulls.go
@@ -7,6 +7,7 @@ package github
import (
"bytes"
+ "context"
"fmt"
"time"
)
@@ -19,35 +20,36 @@ type PullRequestsService service
// PullRequest represents a GitHub pull request on a repository.
type PullRequest struct {
- ID *int `json:"id,omitempty"`
- Number *int `json:"number,omitempty"`
- State *string `json:"state,omitempty"`
- Title *string `json:"title,omitempty"`
- Body *string `json:"body,omitempty"`
- CreatedAt *time.Time `json:"created_at,omitempty"`
- UpdatedAt *time.Time `json:"updated_at,omitempty"`
- ClosedAt *time.Time `json:"closed_at,omitempty"`
- MergedAt *time.Time `json:"merged_at,omitempty"`
- User *User `json:"user,omitempty"`
- Merged *bool `json:"merged,omitempty"`
- Mergeable *bool `json:"mergeable,omitempty"`
- MergedBy *User `json:"merged_by,omitempty"`
- Comments *int `json:"comments,omitempty"`
- Commits *int `json:"commits,omitempty"`
- Additions *int `json:"additions,omitempty"`
- Deletions *int `json:"deletions,omitempty"`
- ChangedFiles *int `json:"changed_files,omitempty"`
- URL *string `json:"url,omitempty"`
- HTMLURL *string `json:"html_url,omitempty"`
- IssueURL *string `json:"issue_url,omitempty"`
- StatusesURL *string `json:"statuses_url,omitempty"`
- DiffURL *string `json:"diff_url,omitempty"`
- PatchURL *string `json:"patch_url,omitempty"`
- ReviewCommentsURL *string `json:"review_comments_url,omitempty"`
- ReviewCommentURL *string `json:"review_comment_url,omitempty"`
- Assignee *User `json:"assignee,omitempty"`
- Assignees []*User `json:"assignees,omitempty"`
- Milestone *Milestone `json:"milestone,omitempty"`
+ ID *int `json:"id,omitempty"`
+ Number *int `json:"number,omitempty"`
+ State *string `json:"state,omitempty"`
+ Title *string `json:"title,omitempty"`
+ Body *string `json:"body,omitempty"`
+ CreatedAt *time.Time `json:"created_at,omitempty"`
+ UpdatedAt *time.Time `json:"updated_at,omitempty"`
+ ClosedAt *time.Time `json:"closed_at,omitempty"`
+ MergedAt *time.Time `json:"merged_at,omitempty"`
+ User *User `json:"user,omitempty"`
+ Merged *bool `json:"merged,omitempty"`
+ Mergeable *bool `json:"mergeable,omitempty"`
+ MergedBy *User `json:"merged_by,omitempty"`
+ Comments *int `json:"comments,omitempty"`
+ Commits *int `json:"commits,omitempty"`
+ Additions *int `json:"additions,omitempty"`
+ Deletions *int `json:"deletions,omitempty"`
+ ChangedFiles *int `json:"changed_files,omitempty"`
+ URL *string `json:"url,omitempty"`
+ HTMLURL *string `json:"html_url,omitempty"`
+ IssueURL *string `json:"issue_url,omitempty"`
+ StatusesURL *string `json:"statuses_url,omitempty"`
+ DiffURL *string `json:"diff_url,omitempty"`
+ PatchURL *string `json:"patch_url,omitempty"`
+ ReviewCommentsURL *string `json:"review_comments_url,omitempty"`
+ ReviewCommentURL *string `json:"review_comment_url,omitempty"`
+ Assignee *User `json:"assignee,omitempty"`
+ Assignees []*User `json:"assignees,omitempty"`
+ Milestone *Milestone `json:"milestone,omitempty"`
+ MaintainerCanModify *bool `json:"maintainer_can_modify,omitempty"`
Head *PullRequestBranch `json:"head,omitempty"`
Base *PullRequestBranch `json:"base,omitempty"`
@@ -95,7 +97,7 @@ type PullRequestListOptions struct {
// List the pull requests for the specified repository.
//
// GitHub API docs: https://developer.github.com/v3/pulls/#list-pull-requests
-func (s *PullRequestsService) List(owner string, repo string, opt *PullRequestListOptions) ([]*PullRequest, *Response, error) {
+func (s *PullRequestsService) List(ctx context.Context, owner string, repo string, opt *PullRequestListOptions) ([]*PullRequest, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/pulls", owner, repo)
u, err := addOptions(u, opt)
if err != nil {
@@ -108,7 +110,7 @@ func (s *PullRequestsService) List(owner string, repo string, opt *PullRequestLi
}
var pulls []*PullRequest
- resp, err := s.client.Do(req, &pulls)
+ resp, err := s.client.Do(ctx, req, &pulls)
if err != nil {
return nil, resp, err
}
@@ -119,7 +121,7 @@ func (s *PullRequestsService) List(owner string, repo string, opt *PullRequestLi
// Get a single pull request.
//
// GitHub API docs: https://developer.github.com/v3/pulls/#get-a-single-pull-request
-func (s *PullRequestsService) Get(owner string, repo string, number int) (*PullRequest, *Response, error) {
+func (s *PullRequestsService) Get(ctx context.Context, owner string, repo string, number int) (*PullRequest, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/pulls/%d", owner, repo, number)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -127,7 +129,7 @@ func (s *PullRequestsService) Get(owner string, repo string, number int) (*PullR
}
pull := new(PullRequest)
- resp, err := s.client.Do(req, pull)
+ resp, err := s.client.Do(ctx, req, pull)
if err != nil {
return nil, resp, err
}
@@ -136,7 +138,7 @@ func (s *PullRequestsService) Get(owner string, repo string, number int) (*PullR
}
// GetRaw gets raw (diff or patch) format of a pull request.
-func (s *PullRequestsService) GetRaw(owner string, repo string, number int, opt RawOptions) (string, *Response, error) {
+func (s *PullRequestsService) GetRaw(ctx context.Context, owner string, repo string, number int, opt RawOptions) (string, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/pulls/%d", owner, repo, number)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -153,7 +155,7 @@ func (s *PullRequestsService) GetRaw(owner string, repo string, number int, opt
}
ret := new(bytes.Buffer)
- resp, err := s.client.Do(req, ret)
+ resp, err := s.client.Do(ctx, req, ret)
if err != nil {
return "", resp, err
}
@@ -163,17 +165,18 @@ func (s *PullRequestsService) GetRaw(owner string, repo string, number int, opt
// NewPullRequest represents a new pull request to be created.
type NewPullRequest struct {
- Title *string `json:"title,omitempty"`
- Head *string `json:"head,omitempty"`
- Base *string `json:"base,omitempty"`
- Body *string `json:"body,omitempty"`
- Issue *int `json:"issue,omitempty"`
+ Title *string `json:"title,omitempty"`
+ Head *string `json:"head,omitempty"`
+ Base *string `json:"base,omitempty"`
+ Body *string `json:"body,omitempty"`
+ Issue *int `json:"issue,omitempty"`
+ MaintainerCanModify *bool `json:"maintainer_can_modify,omitempty"`
}
// Create a new pull request on the specified repository.
//
// GitHub API docs: https://developer.github.com/v3/pulls/#create-a-pull-request
-func (s *PullRequestsService) Create(owner string, repo string, pull *NewPullRequest) (*PullRequest, *Response, error) {
+func (s *PullRequestsService) Create(ctx context.Context, owner string, repo string, pull *NewPullRequest) (*PullRequest, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/pulls", owner, repo)
req, err := s.client.NewRequest("POST", u, pull)
if err != nil {
@@ -181,7 +184,7 @@ func (s *PullRequestsService) Create(owner string, repo string, pull *NewPullReq
}
p := new(PullRequest)
- resp, err := s.client.Do(req, p)
+ resp, err := s.client.Do(ctx, req, p)
if err != nil {
return nil, resp, err
}
@@ -190,29 +193,35 @@ func (s *PullRequestsService) Create(owner string, repo string, pull *NewPullReq
}
type pullRequestUpdate struct {
- Title *string `json:"title,omitempty"`
- Body *string `json:"body,omitempty"`
- State *string `json:"state,omitempty"`
- Base *string `json:"base,omitempty"`
+ Title *string `json:"title,omitempty"`
+ Body *string `json:"body,omitempty"`
+ State *string `json:"state,omitempty"`
+ Base *string `json:"base,omitempty"`
+ MaintainerCanModify *bool `json:"maintainer_can_modify,omitempty"`
}
// Edit a pull request.
+// pull must not be nil.
//
-// The following fields are editable: Title, Body, State, and Base.Ref.
+// The following fields are editable: Title, Body, State, Base.Ref and MaintainerCanModify.
// Base.Ref updates the base branch of the pull request.
//
// GitHub API docs: https://developer.github.com/v3/pulls/#update-a-pull-request
-func (s *PullRequestsService) Edit(owner string, repo string, number int, pull *PullRequest) (*PullRequest, *Response, error) {
+func (s *PullRequestsService) Edit(ctx context.Context, owner string, repo string, number int, pull *PullRequest) (*PullRequest, *Response, error) {
+ if pull == nil {
+ return nil, nil, fmt.Errorf("pull must be provided")
+ }
+
u := fmt.Sprintf("repos/%v/%v/pulls/%d", owner, repo, number)
- update := new(pullRequestUpdate)
- if pull != nil {
- update.Title = pull.Title
- update.Body = pull.Body
- update.State = pull.State
- if pull.Base != nil {
- update.Base = pull.Base.Ref
- }
+ update := &pullRequestUpdate{
+ Title: pull.Title,
+ Body: pull.Body,
+ State: pull.State,
+ MaintainerCanModify: pull.MaintainerCanModify,
+ }
+ if pull.Base != nil {
+ update.Base = pull.Base.Ref
}
req, err := s.client.NewRequest("PATCH", u, update)
@@ -221,7 +230,7 @@ func (s *PullRequestsService) Edit(owner string, repo string, number int, pull *
}
p := new(PullRequest)
- resp, err := s.client.Do(req, p)
+ resp, err := s.client.Do(ctx, req, p)
if err != nil {
return nil, resp, err
}
@@ -232,7 +241,7 @@ func (s *PullRequestsService) Edit(owner string, repo string, number int, pull *
// ListCommits lists the commits in a pull request.
//
// GitHub API docs: https://developer.github.com/v3/pulls/#list-commits-on-a-pull-request
-func (s *PullRequestsService) ListCommits(owner string, repo string, number int, opt *ListOptions) ([]*RepositoryCommit, *Response, error) {
+func (s *PullRequestsService) ListCommits(ctx context.Context, owner string, repo string, number int, opt *ListOptions) ([]*RepositoryCommit, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/pulls/%d/commits", owner, repo, number)
u, err := addOptions(u, opt)
if err != nil {
@@ -245,7 +254,7 @@ func (s *PullRequestsService) ListCommits(owner string, repo string, number int,
}
var commits []*RepositoryCommit
- resp, err := s.client.Do(req, &commits)
+ resp, err := s.client.Do(ctx, req, &commits)
if err != nil {
return nil, resp, err
}
@@ -256,7 +265,7 @@ func (s *PullRequestsService) ListCommits(owner string, repo string, number int,
// ListFiles lists the files in a pull request.
//
// GitHub API docs: https://developer.github.com/v3/pulls/#list-pull-requests-files
-func (s *PullRequestsService) ListFiles(owner string, repo string, number int, opt *ListOptions) ([]*CommitFile, *Response, error) {
+func (s *PullRequestsService) ListFiles(ctx context.Context, owner string, repo string, number int, opt *ListOptions) ([]*CommitFile, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/pulls/%d/files", owner, repo, number)
u, err := addOptions(u, opt)
if err != nil {
@@ -269,7 +278,7 @@ func (s *PullRequestsService) ListFiles(owner string, repo string, number int, o
}
var commitFiles []*CommitFile
- resp, err := s.client.Do(req, &commitFiles)
+ resp, err := s.client.Do(ctx, req, &commitFiles)
if err != nil {
return nil, resp, err
}
@@ -280,14 +289,14 @@ func (s *PullRequestsService) ListFiles(owner string, repo string, number int, o
// IsMerged checks if a pull request has been merged.
//
// GitHub API docs: https://developer.github.com/v3/pulls/#get-if-a-pull-request-has-been-merged
-func (s *PullRequestsService) IsMerged(owner string, repo string, number int) (bool, *Response, error) {
+func (s *PullRequestsService) IsMerged(ctx context.Context, owner string, repo string, number int) (bool, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/pulls/%d/merge", owner, repo, number)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return false, nil, err
}
- resp, err := s.client.Do(req, nil)
+ resp, err := s.client.Do(ctx, req, nil)
merged, err := parseBoolResponse(err)
return merged, resp, err
}
@@ -319,7 +328,7 @@ type pullRequestMergeRequest struct {
// commitMessage is the title for the automatic commit message.
//
// GitHub API docs: https://developer.github.com/v3/pulls/#merge-a-pull-request-merge-buttontrade
-func (s *PullRequestsService) Merge(owner string, repo string, number int, commitMessage string, options *PullRequestOptions) (*PullRequestMergeResult, *Response, error) {
+func (s *PullRequestsService) Merge(ctx context.Context, owner string, repo string, number int, commitMessage string, options *PullRequestOptions) (*PullRequestMergeResult, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/pulls/%d/merge", owner, repo, number)
pullRequestBody := &pullRequestMergeRequest{CommitMessage: commitMessage}
@@ -337,7 +346,7 @@ func (s *PullRequestsService) Merge(owner string, repo string, number int, commi
req.Header.Set("Accept", mediaTypeSquashPreview)
mergeResult := new(PullRequestMergeResult)
- resp, err := s.client.Do(req, mergeResult)
+ resp, err := s.client.Do(ctx, req, mergeResult)
if err != nil {
return nil, resp, err
}
diff --git a/vendor/github.com/google/go-github/github/pulls_comments.go b/vendor/github.com/google/go-github/github/pulls_comments.go
index 0595389..bc0bc2d 100644
--- a/vendor/github.com/google/go-github/github/pulls_comments.go
+++ b/vendor/github.com/google/go-github/github/pulls_comments.go
@@ -6,6 +6,7 @@
package github
import (
+ "context"
"fmt"
"time"
)
@@ -54,7 +55,7 @@ type PullRequestListCommentsOptions struct {
// the repository.
//
// GitHub API docs: https://developer.github.com/v3/pulls/comments/#list-comments-on-a-pull-request
-func (s *PullRequestsService) ListComments(owner string, repo string, number int, opt *PullRequestListCommentsOptions) ([]*PullRequestComment, *Response, error) {
+func (s *PullRequestsService) ListComments(ctx context.Context, owner string, repo string, number int, opt *PullRequestListCommentsOptions) ([]*PullRequestComment, *Response, error) {
var u string
if number == 0 {
u = fmt.Sprintf("repos/%v/%v/pulls/comments", owner, repo)
@@ -75,7 +76,7 @@ func (s *PullRequestsService) ListComments(owner string, repo string, number int
req.Header.Set("Accept", mediaTypeReactionsPreview)
var comments []*PullRequestComment
- resp, err := s.client.Do(req, &comments)
+ resp, err := s.client.Do(ctx, req, &comments)
if err != nil {
return nil, resp, err
}
@@ -86,7 +87,7 @@ func (s *PullRequestsService) ListComments(owner string, repo string, number int
// GetComment fetches the specified pull request comment.
//
// GitHub API docs: https://developer.github.com/v3/pulls/comments/#get-a-single-comment
-func (s *PullRequestsService) GetComment(owner string, repo string, number int) (*PullRequestComment, *Response, error) {
+func (s *PullRequestsService) GetComment(ctx context.Context, owner string, repo string, number int) (*PullRequestComment, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/pulls/comments/%d", owner, repo, number)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -97,7 +98,7 @@ func (s *PullRequestsService) GetComment(owner string, repo string, number int)
req.Header.Set("Accept", mediaTypeReactionsPreview)
comment := new(PullRequestComment)
- resp, err := s.client.Do(req, comment)
+ resp, err := s.client.Do(ctx, req, comment)
if err != nil {
return nil, resp, err
}
@@ -108,7 +109,7 @@ func (s *PullRequestsService) GetComment(owner string, repo string, number int)
// CreateComment creates a new comment on the specified pull request.
//
// GitHub API docs: https://developer.github.com/v3/pulls/comments/#create-a-comment
-func (s *PullRequestsService) CreateComment(owner string, repo string, number int, comment *PullRequestComment) (*PullRequestComment, *Response, error) {
+func (s *PullRequestsService) CreateComment(ctx context.Context, owner string, repo string, number int, comment *PullRequestComment) (*PullRequestComment, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/pulls/%d/comments", owner, repo, number)
req, err := s.client.NewRequest("POST", u, comment)
if err != nil {
@@ -116,7 +117,7 @@ func (s *PullRequestsService) CreateComment(owner string, repo string, number in
}
c := new(PullRequestComment)
- resp, err := s.client.Do(req, c)
+ resp, err := s.client.Do(ctx, req, c)
if err != nil {
return nil, resp, err
}
@@ -127,7 +128,7 @@ func (s *PullRequestsService) CreateComment(owner string, repo string, number in
// EditComment updates a pull request comment.
//
// GitHub API docs: https://developer.github.com/v3/pulls/comments/#edit-a-comment
-func (s *PullRequestsService) EditComment(owner string, repo string, number int, comment *PullRequestComment) (*PullRequestComment, *Response, error) {
+func (s *PullRequestsService) EditComment(ctx context.Context, owner string, repo string, number int, comment *PullRequestComment) (*PullRequestComment, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/pulls/comments/%d", owner, repo, number)
req, err := s.client.NewRequest("PATCH", u, comment)
if err != nil {
@@ -135,7 +136,7 @@ func (s *PullRequestsService) EditComment(owner string, repo string, number int,
}
c := new(PullRequestComment)
- resp, err := s.client.Do(req, c)
+ resp, err := s.client.Do(ctx, req, c)
if err != nil {
return nil, resp, err
}
@@ -146,11 +147,11 @@ func (s *PullRequestsService) EditComment(owner string, repo string, number int,
// DeleteComment deletes a pull request comment.
//
// GitHub API docs: https://developer.github.com/v3/pulls/comments/#delete-a-comment
-func (s *PullRequestsService) DeleteComment(owner string, repo string, number int) (*Response, error) {
+func (s *PullRequestsService) DeleteComment(ctx context.Context, owner string, repo string, number int) (*Response, error) {
u := fmt.Sprintf("repos/%v/%v/pulls/comments/%d", owner, repo, number)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
diff --git a/vendor/github.com/google/go-github/github/pulls_reviewers.go b/vendor/github.com/google/go-github/github/pulls_reviewers.go
new file mode 100644
index 0000000..efa3888
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/pulls_reviewers.go
@@ -0,0 +1,84 @@
+// Copyright 2017 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "context"
+ "fmt"
+)
+
+// RequestReviewers creates a review request for the provided GitHub users for the specified pull request.
+//
+// GitHub API docs: https://developer.github.com/v3/pulls/review_requests/#create-a-review-request
+func (s *PullRequestsService) RequestReviewers(ctx context.Context, owner, repo string, number int, logins []string) (*PullRequest, *Response, error) {
+ u := fmt.Sprintf("repos/%s/%s/pulls/%d/requested_reviewers", owner, repo, number)
+
+ reviewers := struct {
+ Reviewers []string `json:"reviewers,omitempty"`
+ }{
+ Reviewers: logins,
+ }
+ req, err := s.client.NewRequest("POST", u, &reviewers)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches
+ req.Header.Set("Accept", mediaTypePullRequestReviewsPreview)
+
+ r := new(PullRequest)
+ resp, err := s.client.Do(ctx, req, r)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return r, resp, nil
+}
+
+// ListReviewers lists users whose reviews have been requested on the specified pull request.
+//
+// GitHub API docs: https://developer.github.com/v3/pulls/review_requests/#list-review-requests
+func (s *PullRequestsService) ListReviewers(ctx context.Context, owner, repo string, number int) ([]*User, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/pulls/%d/requested_reviewers", owner, repo, number)
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches
+ req.Header.Set("Accept", mediaTypePullRequestReviewsPreview)
+
+ var users []*User
+ resp, err := s.client.Do(ctx, req, &users)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return users, resp, nil
+}
+
+// RemoveReviewers removes the review request for the provided GitHub users for the specified pull request.
+//
+// GitHub API docs: https://developer.github.com/v3/pulls/review_requests/#delete-a-review-request
+func (s *PullRequestsService) RemoveReviewers(ctx context.Context, owner, repo string, number int, logins []string) (*Response, error) {
+ u := fmt.Sprintf("repos/%s/%s/pulls/%d/requested_reviewers", owner, repo, number)
+
+ reviewers := struct {
+ Reviewers []string `json:"reviewers,omitempty"`
+ }{
+ Reviewers: logins,
+ }
+ req, err := s.client.NewRequest("DELETE", u, &reviewers)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches
+ req.Header.Set("Accept", mediaTypePullRequestReviewsPreview)
+
+ return s.client.Do(ctx, req, reviewers)
+}
diff --git a/vendor/github.com/google/go-github/github/pulls_reviews.go b/vendor/github.com/google/go-github/github/pulls_reviews.go
index be57af8..c27b6a8 100644
--- a/vendor/github.com/google/go-github/github/pulls_reviews.go
+++ b/vendor/github.com/google/go-github/github/pulls_reviews.go
@@ -6,6 +6,7 @@
package github
import (
+ "context"
"fmt"
"time"
)
@@ -64,7 +65,7 @@ func (r PullRequestReviewDismissalRequest) String() string {
// Read more about it here - https://github.com/google/go-github/issues/540
//
// GitHub API docs: https://developer.github.com/v3/pulls/reviews/#list-reviews-on-a-pull-request
-func (s *PullRequestsService) ListReviews(owner, repo string, number int) ([]*PullRequestReview, *Response, error) {
+func (s *PullRequestsService) ListReviews(ctx context.Context, owner, repo string, number int) ([]*PullRequestReview, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/pulls/%d/reviews", owner, repo, number)
req, err := s.client.NewRequest("GET", u, nil)
@@ -76,7 +77,7 @@ func (s *PullRequestsService) ListReviews(owner, repo string, number int) ([]*Pu
req.Header.Set("Accept", mediaTypePullRequestReviewsPreview)
var reviews []*PullRequestReview
- resp, err := s.client.Do(req, &reviews)
+ resp, err := s.client.Do(ctx, req, &reviews)
if err != nil {
return nil, resp, err
}
@@ -91,7 +92,7 @@ func (s *PullRequestsService) ListReviews(owner, repo string, number int) ([]*Pu
// Read more about it here - https://github.com/google/go-github/issues/540
//
// GitHub API docs: https://developer.github.com/v3/pulls/reviews/#get-a-single-review
-func (s *PullRequestsService) GetReview(owner, repo string, number, reviewID int) (*PullRequestReview, *Response, error) {
+func (s *PullRequestsService) GetReview(ctx context.Context, owner, repo string, number, reviewID int) (*PullRequestReview, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/pulls/%d/reviews/%d", owner, repo, number, reviewID)
req, err := s.client.NewRequest("GET", u, nil)
@@ -103,7 +104,7 @@ func (s *PullRequestsService) GetReview(owner, repo string, number, reviewID int
req.Header.Set("Accept", mediaTypePullRequestReviewsPreview)
review := new(PullRequestReview)
- resp, err := s.client.Do(req, review)
+ resp, err := s.client.Do(ctx, req, review)
if err != nil {
return nil, resp, err
}
@@ -118,7 +119,7 @@ func (s *PullRequestsService) GetReview(owner, repo string, number, reviewID int
// Read more about it here - https://github.com/google/go-github/issues/540
//
// GitHub API docs: https://developer.github.com/v3/pulls/reviews/#delete-a-pending-review
-func (s *PullRequestsService) DeletePendingReview(owner, repo string, number, reviewID int) (*PullRequestReview, *Response, error) {
+func (s *PullRequestsService) DeletePendingReview(ctx context.Context, owner, repo string, number, reviewID int) (*PullRequestReview, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/pulls/%d/reviews/%d", owner, repo, number, reviewID)
req, err := s.client.NewRequest("DELETE", u, nil)
@@ -130,7 +131,7 @@ func (s *PullRequestsService) DeletePendingReview(owner, repo string, number, re
req.Header.Set("Accept", mediaTypePullRequestReviewsPreview)
review := new(PullRequestReview)
- resp, err := s.client.Do(req, review)
+ resp, err := s.client.Do(ctx, req, review)
if err != nil {
return nil, resp, err
}
@@ -145,7 +146,7 @@ func (s *PullRequestsService) DeletePendingReview(owner, repo string, number, re
// Read more about it here - https://github.com/google/go-github/issues/540
//
// GitHub API docs: https://developer.github.com/v3/pulls/reviews/#get-a-single-reviews-comments
-func (s *PullRequestsService) ListReviewComments(owner, repo string, number, reviewID int) ([]*PullRequestComment, *Response, error) {
+func (s *PullRequestsService) ListReviewComments(ctx context.Context, owner, repo string, number, reviewID int) ([]*PullRequestComment, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/pulls/%d/reviews/%d/comments", owner, repo, number, reviewID)
req, err := s.client.NewRequest("GET", u, nil)
@@ -157,7 +158,7 @@ func (s *PullRequestsService) ListReviewComments(owner, repo string, number, rev
req.Header.Set("Accept", mediaTypePullRequestReviewsPreview)
var comments []*PullRequestComment
- resp, err := s.client.Do(req, &comments)
+ resp, err := s.client.Do(ctx, req, &comments)
if err != nil {
return nil, resp, err
}
@@ -172,7 +173,7 @@ func (s *PullRequestsService) ListReviewComments(owner, repo string, number, rev
// Read more about it here - https://github.com/google/go-github/issues/540
//
// GitHub API docs: https://developer.github.com/v3/pulls/reviews/#create-a-pull-request-review
-func (s *PullRequestsService) CreateReview(owner, repo string, number int, review *PullRequestReviewRequest) (*PullRequestReview, *Response, error) {
+func (s *PullRequestsService) CreateReview(ctx context.Context, owner, repo string, number int, review *PullRequestReviewRequest) (*PullRequestReview, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/pulls/%d/reviews", owner, repo, number)
req, err := s.client.NewRequest("POST", u, review)
@@ -184,7 +185,7 @@ func (s *PullRequestsService) CreateReview(owner, repo string, number int, revie
req.Header.Set("Accept", mediaTypePullRequestReviewsPreview)
r := new(PullRequestReview)
- resp, err := s.client.Do(req, r)
+ resp, err := s.client.Do(ctx, req, r)
if err != nil {
return nil, resp, err
}
@@ -199,7 +200,7 @@ func (s *PullRequestsService) CreateReview(owner, repo string, number int, revie
// Read more about it here - https://github.com/google/go-github/issues/540
//
// GitHub API docs: https://developer.github.com/v3/pulls/reviews/#submit-a-pull-request-review
-func (s *PullRequestsService) SubmitReview(owner, repo string, number, reviewID int, review *PullRequestReviewRequest) (*PullRequestReview, *Response, error) {
+func (s *PullRequestsService) SubmitReview(ctx context.Context, owner, repo string, number, reviewID int, review *PullRequestReviewRequest) (*PullRequestReview, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/pulls/%d/reviews/%d/events", owner, repo, number, reviewID)
req, err := s.client.NewRequest("POST", u, review)
@@ -211,7 +212,7 @@ func (s *PullRequestsService) SubmitReview(owner, repo string, number, reviewID
req.Header.Set("Accept", mediaTypePullRequestReviewsPreview)
r := new(PullRequestReview)
- resp, err := s.client.Do(req, r)
+ resp, err := s.client.Do(ctx, req, r)
if err != nil {
return nil, resp, err
}
@@ -226,7 +227,7 @@ func (s *PullRequestsService) SubmitReview(owner, repo string, number, reviewID
// Read more about it here - https://github.com/google/go-github/issues/540
//
// GitHub API docs: https://developer.github.com/v3/pulls/reviews/#dismiss-a-pull-request-review
-func (s *PullRequestsService) DismissReview(owner, repo string, number, reviewID int, review *PullRequestReviewDismissalRequest) (*PullRequestReview, *Response, error) {
+func (s *PullRequestsService) DismissReview(ctx context.Context, owner, repo string, number, reviewID int, review *PullRequestReviewDismissalRequest) (*PullRequestReview, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/pulls/%d/reviews/%d/dismissals", owner, repo, number, reviewID)
req, err := s.client.NewRequest("PUT", u, review)
@@ -238,7 +239,7 @@ func (s *PullRequestsService) DismissReview(owner, repo string, number, reviewID
req.Header.Set("Accept", mediaTypePullRequestReviewsPreview)
r := new(PullRequestReview)
- resp, err := s.client.Do(req, r)
+ resp, err := s.client.Do(ctx, req, r)
if err != nil {
return nil, resp, err
}
diff --git a/vendor/github.com/google/go-github/github/reactions.go b/vendor/github.com/google/go-github/github/reactions.go
index 03b131b..739413d 100644
--- a/vendor/github.com/google/go-github/github/reactions.go
+++ b/vendor/github.com/google/go-github/github/reactions.go
@@ -5,7 +5,10 @@
package github
-import "fmt"
+import (
+ "context"
+ "fmt"
+)
// ReactionsService provides access to the reactions-related functions in the
// GitHub API.
@@ -43,7 +46,7 @@ func (r Reaction) String() string {
// ListCommentReactions lists the reactions for a commit comment.
//
// GitHub API docs: https://developer.github.com/v3/reactions/#list-reactions-for-a-commit-comment
-func (s *ReactionsService) ListCommentReactions(owner, repo string, id int, opt *ListOptions) ([]*Reaction, *Response, error) {
+func (s *ReactionsService) ListCommentReactions(ctx context.Context, owner, repo string, id int, opt *ListOptions) ([]*Reaction, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/comments/%v/reactions", owner, repo, id)
u, err := addOptions(u, opt)
if err != nil {
@@ -59,7 +62,7 @@ func (s *ReactionsService) ListCommentReactions(owner, repo string, id int, opt
req.Header.Set("Accept", mediaTypeReactionsPreview)
var m []*Reaction
- resp, err := s.client.Do(req, &m)
+ resp, err := s.client.Do(ctx, req, &m)
if err != nil {
return nil, resp, err
}
@@ -72,7 +75,7 @@ func (s *ReactionsService) ListCommentReactions(owner, repo string, id int, opt
// previously created reaction will be returned with Status: 200 OK.
//
// GitHub API docs: https://developer.github.com/v3/reactions/#create-reaction-for-a-commit-comment
-func (s ReactionsService) CreateCommentReaction(owner, repo string, id int, content string) (*Reaction, *Response, error) {
+func (s ReactionsService) CreateCommentReaction(ctx context.Context, owner, repo string, id int, content string) (*Reaction, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/comments/%v/reactions", owner, repo, id)
body := &Reaction{Content: String(content)}
@@ -85,7 +88,7 @@ func (s ReactionsService) CreateCommentReaction(owner, repo string, id int, cont
req.Header.Set("Accept", mediaTypeReactionsPreview)
m := &Reaction{}
- resp, err := s.client.Do(req, m)
+ resp, err := s.client.Do(ctx, req, m)
if err != nil {
return nil, resp, err
}
@@ -96,7 +99,7 @@ func (s ReactionsService) CreateCommentReaction(owner, repo string, id int, cont
// ListIssueReactions lists the reactions for an issue.
//
// GitHub API docs: https://developer.github.com/v3/reactions/#list-reactions-for-an-issue
-func (s *ReactionsService) ListIssueReactions(owner, repo string, number int, opt *ListOptions) ([]*Reaction, *Response, error) {
+func (s *ReactionsService) ListIssueReactions(ctx context.Context, owner, repo string, number int, opt *ListOptions) ([]*Reaction, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/issues/%v/reactions", owner, repo, number)
u, err := addOptions(u, opt)
if err != nil {
@@ -112,7 +115,7 @@ func (s *ReactionsService) ListIssueReactions(owner, repo string, number int, op
req.Header.Set("Accept", mediaTypeReactionsPreview)
var m []*Reaction
- resp, err := s.client.Do(req, &m)
+ resp, err := s.client.Do(ctx, req, &m)
if err != nil {
return nil, resp, err
}
@@ -125,7 +128,7 @@ func (s *ReactionsService) ListIssueReactions(owner, repo string, number int, op
// previously created reaction will be returned with Status: 200 OK.
//
// GitHub API docs: https://developer.github.com/v3/reactions/#create-reaction-for-an-issue
-func (s ReactionsService) CreateIssueReaction(owner, repo string, number int, content string) (*Reaction, *Response, error) {
+func (s ReactionsService) CreateIssueReaction(ctx context.Context, owner, repo string, number int, content string) (*Reaction, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/issues/%v/reactions", owner, repo, number)
body := &Reaction{Content: String(content)}
@@ -138,7 +141,7 @@ func (s ReactionsService) CreateIssueReaction(owner, repo string, number int, co
req.Header.Set("Accept", mediaTypeReactionsPreview)
m := &Reaction{}
- resp, err := s.client.Do(req, m)
+ resp, err := s.client.Do(ctx, req, m)
if err != nil {
return nil, resp, err
}
@@ -149,7 +152,7 @@ func (s ReactionsService) CreateIssueReaction(owner, repo string, number int, co
// ListIssueCommentReactions lists the reactions for an issue comment.
//
// GitHub API docs: https://developer.github.com/v3/reactions/#list-reactions-for-an-issue-comment
-func (s *ReactionsService) ListIssueCommentReactions(owner, repo string, id int, opt *ListOptions) ([]*Reaction, *Response, error) {
+func (s *ReactionsService) ListIssueCommentReactions(ctx context.Context, owner, repo string, id int, opt *ListOptions) ([]*Reaction, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/issues/comments/%v/reactions", owner, repo, id)
u, err := addOptions(u, opt)
if err != nil {
@@ -165,7 +168,7 @@ func (s *ReactionsService) ListIssueCommentReactions(owner, repo string, id int,
req.Header.Set("Accept", mediaTypeReactionsPreview)
var m []*Reaction
- resp, err := s.client.Do(req, &m)
+ resp, err := s.client.Do(ctx, req, &m)
if err != nil {
return nil, resp, err
}
@@ -178,7 +181,7 @@ func (s *ReactionsService) ListIssueCommentReactions(owner, repo string, id int,
// previously created reaction will be returned with Status: 200 OK.
//
// GitHub API docs: https://developer.github.com/v3/reactions/#create-reaction-for-an-issue-comment
-func (s ReactionsService) CreateIssueCommentReaction(owner, repo string, id int, content string) (*Reaction, *Response, error) {
+func (s ReactionsService) CreateIssueCommentReaction(ctx context.Context, owner, repo string, id int, content string) (*Reaction, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/issues/comments/%v/reactions", owner, repo, id)
body := &Reaction{Content: String(content)}
@@ -191,7 +194,7 @@ func (s ReactionsService) CreateIssueCommentReaction(owner, repo string, id int,
req.Header.Set("Accept", mediaTypeReactionsPreview)
m := &Reaction{}
- resp, err := s.client.Do(req, m)
+ resp, err := s.client.Do(ctx, req, m)
if err != nil {
return nil, resp, err
}
@@ -202,7 +205,7 @@ func (s ReactionsService) CreateIssueCommentReaction(owner, repo string, id int,
// ListPullRequestCommentReactions lists the reactions for a pull request review comment.
//
// GitHub API docs: https://developer.github.com/v3/reactions/#list-reactions-for-an-issue-comment
-func (s *ReactionsService) ListPullRequestCommentReactions(owner, repo string, id int, opt *ListOptions) ([]*Reaction, *Response, error) {
+func (s *ReactionsService) ListPullRequestCommentReactions(ctx context.Context, owner, repo string, id int, opt *ListOptions) ([]*Reaction, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/pulls/comments/%v/reactions", owner, repo, id)
u, err := addOptions(u, opt)
if err != nil {
@@ -218,7 +221,7 @@ func (s *ReactionsService) ListPullRequestCommentReactions(owner, repo string, i
req.Header.Set("Accept", mediaTypeReactionsPreview)
var m []*Reaction
- resp, err := s.client.Do(req, &m)
+ resp, err := s.client.Do(ctx, req, &m)
if err != nil {
return nil, resp, err
}
@@ -231,7 +234,7 @@ func (s *ReactionsService) ListPullRequestCommentReactions(owner, repo string, i
// previously created reaction will be returned with Status: 200 OK.
//
// GitHub API docs: https://developer.github.com/v3/reactions/#create-reaction-for-an-issue-comment
-func (s ReactionsService) CreatePullRequestCommentReaction(owner, repo string, id int, content string) (*Reaction, *Response, error) {
+func (s ReactionsService) CreatePullRequestCommentReaction(ctx context.Context, owner, repo string, id int, content string) (*Reaction, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/pulls/comments/%v/reactions", owner, repo, id)
body := &Reaction{Content: String(content)}
@@ -244,7 +247,7 @@ func (s ReactionsService) CreatePullRequestCommentReaction(owner, repo string, i
req.Header.Set("Accept", mediaTypeReactionsPreview)
m := &Reaction{}
- resp, err := s.client.Do(req, m)
+ resp, err := s.client.Do(ctx, req, m)
if err != nil {
return nil, resp, err
}
@@ -255,7 +258,7 @@ func (s ReactionsService) CreatePullRequestCommentReaction(owner, repo string, i
// DeleteReaction deletes a reaction.
//
// GitHub API docs: https://developer.github.com/v3/reaction/reactions/#delete-a-reaction-archive
-func (s *ReactionsService) DeleteReaction(id int) (*Response, error) {
+func (s *ReactionsService) DeleteReaction(ctx context.Context, id int) (*Response, error) {
u := fmt.Sprintf("reactions/%v", id)
req, err := s.client.NewRequest("DELETE", u, nil)
@@ -266,5 +269,5 @@ func (s *ReactionsService) DeleteReaction(id int) (*Response, error) {
// TODO: remove custom Accept header when this API fully launches.
req.Header.Set("Accept", mediaTypeReactionsPreview)
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
diff --git a/vendor/github.com/google/go-github/github/repos.go b/vendor/github.com/google/go-github/github/repos.go
index 58d27f1..058f149 100644
--- a/vendor/github.com/google/go-github/github/repos.go
+++ b/vendor/github.com/google/go-github/github/repos.go
@@ -6,6 +6,7 @@
package github
import (
+ "context"
"fmt"
"strings"
)
@@ -155,7 +156,7 @@ type RepositoryListOptions struct {
// repositories for the authenticated user.
//
// GitHub API docs: https://developer.github.com/v3/repos/#list-user-repositories
-func (s *RepositoriesService) List(user string, opt *RepositoryListOptions) ([]*Repository, *Response, error) {
+func (s *RepositoriesService) List(ctx context.Context, user string, opt *RepositoryListOptions) ([]*Repository, *Response, error) {
var u string
if user != "" {
u = fmt.Sprintf("users/%v/repos", user)
@@ -176,7 +177,7 @@ func (s *RepositoriesService) List(user string, opt *RepositoryListOptions) ([]*
req.Header.Set("Accept", mediaTypeLicensesPreview)
var repos []*Repository
- resp, err := s.client.Do(req, &repos)
+ resp, err := s.client.Do(ctx, req, &repos)
if err != nil {
return nil, resp, err
}
@@ -197,7 +198,7 @@ type RepositoryListByOrgOptions struct {
// ListByOrg lists the repositories for an organization.
//
// GitHub API docs: https://developer.github.com/v3/repos/#list-organization-repositories
-func (s *RepositoriesService) ListByOrg(org string, opt *RepositoryListByOrgOptions) ([]*Repository, *Response, error) {
+func (s *RepositoriesService) ListByOrg(ctx context.Context, org string, opt *RepositoryListByOrgOptions) ([]*Repository, *Response, error) {
u := fmt.Sprintf("orgs/%v/repos", org)
u, err := addOptions(u, opt)
if err != nil {
@@ -213,7 +214,7 @@ func (s *RepositoriesService) ListByOrg(org string, opt *RepositoryListByOrgOpti
req.Header.Set("Accept", mediaTypeLicensesPreview)
var repos []*Repository
- resp, err := s.client.Do(req, &repos)
+ resp, err := s.client.Do(ctx, req, &repos)
if err != nil {
return nil, resp, err
}
@@ -233,7 +234,7 @@ type RepositoryListAllOptions struct {
// ListAll lists all GitHub repositories in the order that they were created.
//
// GitHub API docs: https://developer.github.com/v3/repos/#list-all-public-repositories
-func (s *RepositoriesService) ListAll(opt *RepositoryListAllOptions) ([]*Repository, *Response, error) {
+func (s *RepositoriesService) ListAll(ctx context.Context, opt *RepositoryListAllOptions) ([]*Repository, *Response, error) {
u, err := addOptions("repositories", opt)
if err != nil {
return nil, nil, err
@@ -245,7 +246,7 @@ func (s *RepositoriesService) ListAll(opt *RepositoryListAllOptions) ([]*Reposit
}
var repos []*Repository
- resp, err := s.client.Do(req, &repos)
+ resp, err := s.client.Do(ctx, req, &repos)
if err != nil {
return nil, resp, err
}
@@ -258,7 +259,7 @@ func (s *RepositoriesService) ListAll(opt *RepositoryListAllOptions) ([]*Reposit
// specified, it will be created for the authenticated user.
//
// GitHub API docs: https://developer.github.com/v3/repos/#create
-func (s *RepositoriesService) Create(org string, repo *Repository) (*Repository, *Response, error) {
+func (s *RepositoriesService) Create(ctx context.Context, org string, repo *Repository) (*Repository, *Response, error) {
var u string
if org != "" {
u = fmt.Sprintf("orgs/%v/repos", org)
@@ -272,7 +273,7 @@ func (s *RepositoriesService) Create(org string, repo *Repository) (*Repository,
}
r := new(Repository)
- resp, err := s.client.Do(req, r)
+ resp, err := s.client.Do(ctx, req, r)
if err != nil {
return nil, resp, err
}
@@ -283,7 +284,7 @@ func (s *RepositoriesService) Create(org string, repo *Repository) (*Repository,
// Get fetches a repository.
//
// GitHub API docs: https://developer.github.com/v3/repos/#get
-func (s *RepositoriesService) Get(owner, repo string) (*Repository, *Response, error) {
+func (s *RepositoriesService) Get(ctx context.Context, owner, repo string) (*Repository, *Response, error) {
u := fmt.Sprintf("repos/%v/%v", owner, repo)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -296,7 +297,7 @@ func (s *RepositoriesService) Get(owner, repo string) (*Repository, *Response, e
req.Header.Set("Accept", strings.Join(acceptHeaders, ", "))
repository := new(Repository)
- resp, err := s.client.Do(req, repository)
+ resp, err := s.client.Do(ctx, req, repository)
if err != nil {
return nil, resp, err
}
@@ -307,7 +308,7 @@ func (s *RepositoriesService) Get(owner, repo string) (*Repository, *Response, e
// GetByID fetches a repository.
//
// Note: GetByID uses the undocumented GitHub API endpoint /repositories/:id.
-func (s *RepositoriesService) GetByID(id int) (*Repository, *Response, error) {
+func (s *RepositoriesService) GetByID(ctx context.Context, id int) (*Repository, *Response, error) {
u := fmt.Sprintf("repositories/%d", id)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -319,7 +320,7 @@ func (s *RepositoriesService) GetByID(id int) (*Repository, *Response, error) {
req.Header.Set("Accept", mediaTypeLicensesPreview)
repository := new(Repository)
- resp, err := s.client.Do(req, repository)
+ resp, err := s.client.Do(ctx, req, repository)
if err != nil {
return nil, resp, err
}
@@ -330,7 +331,7 @@ func (s *RepositoriesService) GetByID(id int) (*Repository, *Response, error) {
// Edit updates a repository.
//
// GitHub API docs: https://developer.github.com/v3/repos/#edit
-func (s *RepositoriesService) Edit(owner, repo string, repository *Repository) (*Repository, *Response, error) {
+func (s *RepositoriesService) Edit(ctx context.Context, owner, repo string, repository *Repository) (*Repository, *Response, error) {
u := fmt.Sprintf("repos/%v/%v", owner, repo)
req, err := s.client.NewRequest("PATCH", u, repository)
if err != nil {
@@ -338,10 +339,10 @@ func (s *RepositoriesService) Edit(owner, repo string, repository *Repository) (
}
// TODO: Remove this preview header after API is fully vetted.
- req.Header.Add("Accept", mediaTypeSquashPreview)
+ req.Header.Set("Accept", mediaTypeSquashPreview)
r := new(Repository)
- resp, err := s.client.Do(req, r)
+ resp, err := s.client.Do(ctx, req, r)
if err != nil {
return nil, resp, err
}
@@ -352,14 +353,14 @@ func (s *RepositoriesService) Edit(owner, repo string, repository *Repository) (
// Delete a repository.
//
// GitHub API docs: https://developer.github.com/v3/repos/#delete-a-repository
-func (s *RepositoriesService) Delete(owner, repo string) (*Response, error) {
+func (s *RepositoriesService) Delete(ctx context.Context, owner, repo string) (*Response, error) {
u := fmt.Sprintf("repos/%v/%v", owner, repo)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
// Contributor represents a repository contributor
@@ -396,7 +397,7 @@ type ListContributorsOptions struct {
// ListContributors lists contributors for a repository.
//
// GitHub API docs: https://developer.github.com/v3/repos/#list-contributors
-func (s *RepositoriesService) ListContributors(owner string, repository string, opt *ListContributorsOptions) ([]*Contributor, *Response, error) {
+func (s *RepositoriesService) ListContributors(ctx context.Context, owner string, repository string, opt *ListContributorsOptions) ([]*Contributor, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/contributors", owner, repository)
u, err := addOptions(u, opt)
if err != nil {
@@ -409,7 +410,7 @@ func (s *RepositoriesService) ListContributors(owner string, repository string,
}
var contributor []*Contributor
- resp, err := s.client.Do(req, &contributor)
+ resp, err := s.client.Do(ctx, req, &contributor)
if err != nil {
return nil, nil, err
}
@@ -426,8 +427,8 @@ func (s *RepositoriesService) ListContributors(owner string, repository string,
// "Python": 7769
// }
//
-// GitHub API Docs: https://developer.github.com/v3/repos/#list-languages
-func (s *RepositoriesService) ListLanguages(owner string, repo string) (map[string]int, *Response, error) {
+// GitHub API docs: https://developer.github.com/v3/repos/#list-languages
+func (s *RepositoriesService) ListLanguages(ctx context.Context, owner string, repo string) (map[string]int, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/languages", owner, repo)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -435,7 +436,7 @@ func (s *RepositoriesService) ListLanguages(owner string, repo string) (map[stri
}
languages := make(map[string]int)
- resp, err := s.client.Do(req, &languages)
+ resp, err := s.client.Do(ctx, req, &languages)
if err != nil {
return nil, resp, err
}
@@ -446,7 +447,7 @@ func (s *RepositoriesService) ListLanguages(owner string, repo string) (map[stri
// ListTeams lists the teams for the specified repository.
//
// GitHub API docs: https://developer.github.com/v3/repos/#list-teams
-func (s *RepositoriesService) ListTeams(owner string, repo string, opt *ListOptions) ([]*Team, *Response, error) {
+func (s *RepositoriesService) ListTeams(ctx context.Context, owner string, repo string, opt *ListOptions) ([]*Team, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/teams", owner, repo)
u, err := addOptions(u, opt)
if err != nil {
@@ -459,7 +460,7 @@ func (s *RepositoriesService) ListTeams(owner string, repo string, opt *ListOpti
}
var teams []*Team
- resp, err := s.client.Do(req, &teams)
+ resp, err := s.client.Do(ctx, req, &teams)
if err != nil {
return nil, resp, err
}
@@ -478,7 +479,7 @@ type RepositoryTag struct {
// ListTags lists tags for the specified repository.
//
// GitHub API docs: https://developer.github.com/v3/repos/#list-tags
-func (s *RepositoriesService) ListTags(owner string, repo string, opt *ListOptions) ([]*RepositoryTag, *Response, error) {
+func (s *RepositoriesService) ListTags(ctx context.Context, owner string, repo string, opt *ListOptions) ([]*RepositoryTag, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/tags", owner, repo)
u, err := addOptions(u, opt)
if err != nil {
@@ -491,7 +492,7 @@ func (s *RepositoriesService) ListTags(owner string, repo string, opt *ListOptio
}
var tags []*RepositoryTag
- resp, err := s.client.Do(req, &tags)
+ resp, err := s.client.Do(ctx, req, &tags)
if err != nil {
return nil, resp, err
}
@@ -560,7 +561,7 @@ type BranchRestrictionsRequest struct {
// ListBranches lists branches for the specified repository.
//
// GitHub API docs: https://developer.github.com/v3/repos/#list-branches
-func (s *RepositoriesService) ListBranches(owner string, repo string, opt *ListOptions) ([]*Branch, *Response, error) {
+func (s *RepositoriesService) ListBranches(ctx context.Context, owner string, repo string, opt *ListOptions) ([]*Branch, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/branches", owner, repo)
u, err := addOptions(u, opt)
if err != nil {
@@ -576,7 +577,7 @@ func (s *RepositoriesService) ListBranches(owner string, repo string, opt *ListO
req.Header.Set("Accept", mediaTypeProtectedBranchesPreview)
var branches []*Branch
- resp, err := s.client.Do(req, &branches)
+ resp, err := s.client.Do(ctx, req, &branches)
if err != nil {
return nil, resp, err
}
@@ -587,7 +588,7 @@ func (s *RepositoriesService) ListBranches(owner string, repo string, opt *ListO
// GetBranch gets the specified branch for a repository.
//
// GitHub API docs: https://developer.github.com/v3/repos/#get-branch
-func (s *RepositoriesService) GetBranch(owner, repo, branch string) (*Branch, *Response, error) {
+func (s *RepositoriesService) GetBranch(ctx context.Context, owner, repo, branch string) (*Branch, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/branches/%v", owner, repo, branch)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -598,7 +599,7 @@ func (s *RepositoriesService) GetBranch(owner, repo, branch string) (*Branch, *R
req.Header.Set("Accept", mediaTypeProtectedBranchesPreview)
b := new(Branch)
- resp, err := s.client.Do(req, b)
+ resp, err := s.client.Do(ctx, req, b)
if err != nil {
return nil, resp, err
}
@@ -609,7 +610,7 @@ func (s *RepositoriesService) GetBranch(owner, repo, branch string) (*Branch, *R
// GetBranchProtection gets the protection of a given branch.
//
// GitHub API docs: https://developer.github.com/v3/repos/branches/#get-branch-protection
-func (s *RepositoriesService) GetBranchProtection(owner, repo, branch string) (*Protection, *Response, error) {
+func (s *RepositoriesService) GetBranchProtection(ctx context.Context, owner, repo, branch string) (*Protection, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/branches/%v/protection", owner, repo, branch)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -620,7 +621,7 @@ func (s *RepositoriesService) GetBranchProtection(owner, repo, branch string) (*
req.Header.Set("Accept", mediaTypeProtectedBranchesPreview)
p := new(Protection)
- resp, err := s.client.Do(req, p)
+ resp, err := s.client.Do(ctx, req, p)
if err != nil {
return nil, resp, err
}
@@ -628,10 +629,53 @@ func (s *RepositoriesService) GetBranchProtection(owner, repo, branch string) (*
return p, resp, nil
}
+// GetRequiredStatusChecks gets the required status checks for a given protected branch.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/branches/#get-required-status-checks-of-protected-branch
+func (s *RepositoriesService) GetRequiredStatusChecks(ctx context.Context, owner, repo, branch string) (*RequiredStatusChecks, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_status_checks", owner, repo, branch)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches
+ req.Header.Set("Accept", mediaTypeProtectedBranchesPreview)
+
+ p := new(RequiredStatusChecks)
+ resp, err := s.client.Do(ctx, req, p)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return p, resp, nil
+}
+
+// ListRequiredStatusChecksContexts lists the required status checks contexts for a given protected branch.
+//
+// GitHub API docs: https://developer.github.com/v3/repos/branches/#list-required-status-checks-contexts-of-protected-branch
+func (s *RepositoriesService) ListRequiredStatusChecksContexts(ctx context.Context, owner, repo, branch string) (contexts []string, resp *Response, err error) {
+ u := fmt.Sprintf("repos/%v/%v/branches/%v/protection/required_status_checks/contexts", owner, repo, branch)
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches
+ req.Header.Set("Accept", mediaTypeProtectedBranchesPreview)
+
+ resp, err = s.client.Do(ctx, req, &contexts)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return contexts, resp, nil
+}
+
// UpdateBranchProtection updates the protection of a given branch.
//
// GitHub API docs: https://developer.github.com/v3/repos/branches/#update-branch-protection
-func (s *RepositoriesService) UpdateBranchProtection(owner, repo, branch string, preq *ProtectionRequest) (*Protection, *Response, error) {
+func (s *RepositoriesService) UpdateBranchProtection(ctx context.Context, owner, repo, branch string, preq *ProtectionRequest) (*Protection, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/branches/%v/protection", owner, repo, branch)
req, err := s.client.NewRequest("PUT", u, preq)
if err != nil {
@@ -642,7 +686,7 @@ func (s *RepositoriesService) UpdateBranchProtection(owner, repo, branch string,
req.Header.Set("Accept", mediaTypeProtectedBranchesPreview)
p := new(Protection)
- resp, err := s.client.Do(req, p)
+ resp, err := s.client.Do(ctx, req, p)
if err != nil {
return nil, resp, err
}
@@ -653,7 +697,7 @@ func (s *RepositoriesService) UpdateBranchProtection(owner, repo, branch string,
// RemoveBranchProtection removes the protection of a given branch.
//
// GitHub API docs: https://developer.github.com/v3/repos/branches/#remove-branch-protection
-func (s *RepositoriesService) RemoveBranchProtection(owner, repo, branch string) (*Response, error) {
+func (s *RepositoriesService) RemoveBranchProtection(ctx context.Context, owner, repo, branch string) (*Response, error) {
u := fmt.Sprintf("repos/%v/%v/branches/%v/protection", owner, repo, branch)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
@@ -663,13 +707,13 @@ func (s *RepositoriesService) RemoveBranchProtection(owner, repo, branch string)
// TODO: remove custom Accept header when this API fully launches
req.Header.Set("Accept", mediaTypeProtectedBranchesPreview)
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
// License gets the contents of a repository's license if one is detected.
//
// GitHub API docs: https://developer.github.com/v3/licenses/#get-the-contents-of-a-repositorys-license
-func (s *RepositoriesService) License(owner, repo string) (*RepositoryLicense, *Response, error) {
+func (s *RepositoriesService) License(ctx context.Context, owner, repo string) (*RepositoryLicense, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/license", owner, repo)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -677,7 +721,7 @@ func (s *RepositoriesService) License(owner, repo string) (*RepositoryLicense, *
}
r := &RepositoryLicense{}
- resp, err := s.client.Do(req, r)
+ resp, err := s.client.Do(ctx, req, r)
if err != nil {
return nil, resp, err
}
diff --git a/vendor/github.com/google/go-github/github/repos_collaborators.go b/vendor/github.com/google/go-github/github/repos_collaborators.go
index ddb88f5..76e8a1f 100644
--- a/vendor/github.com/google/go-github/github/repos_collaborators.go
+++ b/vendor/github.com/google/go-github/github/repos_collaborators.go
@@ -5,12 +5,15 @@
package github
-import "fmt"
+import (
+ "context"
+ "fmt"
+)
-// ListCollaborators lists the Github users that have access to the repository.
+// ListCollaborators lists the GitHub users that have access to the repository.
//
// GitHub API docs: https://developer.github.com/v3/repos/collaborators/#list
-func (s *RepositoriesService) ListCollaborators(owner, repo string, opt *ListOptions) ([]*User, *Response, error) {
+func (s *RepositoriesService) ListCollaborators(ctx context.Context, owner, repo string, opt *ListOptions) ([]*User, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/collaborators", owner, repo)
u, err := addOptions(u, opt)
if err != nil {
@@ -23,7 +26,7 @@ func (s *RepositoriesService) ListCollaborators(owner, repo string, opt *ListOpt
}
var users []*User
- resp, err := s.client.Do(req, &users)
+ resp, err := s.client.Do(ctx, req, &users)
if err != nil {
return nil, resp, err
}
@@ -31,20 +34,20 @@ func (s *RepositoriesService) ListCollaborators(owner, repo string, opt *ListOpt
return users, resp, nil
}
-// IsCollaborator checks whether the specified Github user has collaborator
+// IsCollaborator checks whether the specified GitHub user has collaborator
// access to the given repo.
// Note: This will return false if the user is not a collaborator OR the user
// is not a GitHub user.
//
// GitHub API docs: https://developer.github.com/v3/repos/collaborators/#get
-func (s *RepositoriesService) IsCollaborator(owner, repo, user string) (bool, *Response, error) {
+func (s *RepositoriesService) IsCollaborator(ctx context.Context, owner, repo, user string) (bool, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/collaborators/%v", owner, repo, user)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return false, nil, err
}
- resp, err := s.client.Do(req, nil)
+ resp, err := s.client.Do(ctx, req, nil)
isCollab, err := parseBoolResponse(err)
return isCollab, resp, err
}
@@ -60,18 +63,15 @@ type RepositoryPermissionLevel struct {
// GetPermissionLevel retrieves the specific permission level a collaborator has for a given repository.
// GitHub API docs: https://developer.github.com/v3/repos/collaborators/#review-a-users-permission-level
-func (s *RepositoriesService) GetPermissionLevel(owner, repo, user string) (*RepositoryPermissionLevel, *Response, error) {
+func (s *RepositoriesService) GetPermissionLevel(ctx context.Context, owner, repo, user string) (*RepositoryPermissionLevel, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/collaborators/%v/permission", owner, repo, user)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
- // TODO: remove custom Accept header when this API fully launches.
- req.Header.Set("Accept", mediaTypeOrgMembershipPreview)
-
rpl := new(RepositoryPermissionLevel)
- resp, err := s.client.Do(req, rpl)
+ resp, err := s.client.Do(ctx, req, rpl)
if err != nil {
return nil, resp, err
}
@@ -91,10 +91,10 @@ type RepositoryAddCollaboratorOptions struct {
Permission string `json:"permission,omitempty"`
}
-// AddCollaborator adds the specified Github user as collaborator to the given repo.
+// AddCollaborator adds the specified GitHub user as collaborator to the given repo.
//
// GitHub API docs: https://developer.github.com/v3/repos/collaborators/#add-user-as-a-collaborator
-func (s *RepositoriesService) AddCollaborator(owner, repo, user string, opt *RepositoryAddCollaboratorOptions) (*Response, error) {
+func (s *RepositoriesService) AddCollaborator(ctx context.Context, owner, repo, user string, opt *RepositoryAddCollaboratorOptions) (*Response, error) {
u := fmt.Sprintf("repos/%v/%v/collaborators/%v", owner, repo, user)
req, err := s.client.NewRequest("PUT", u, opt)
if err != nil {
@@ -104,18 +104,18 @@ func (s *RepositoriesService) AddCollaborator(owner, repo, user string, opt *Rep
// TODO: remove custom Accept header when this API fully launches.
req.Header.Set("Accept", mediaTypeRepositoryInvitationsPreview)
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
-// RemoveCollaborator removes the specified Github user as collaborator from the given repo.
+// RemoveCollaborator removes the specified GitHub user as collaborator from the given repo.
// Note: Does not return error if a valid user that is not a collaborator is removed.
//
// GitHub API docs: https://developer.github.com/v3/repos/collaborators/#remove-collaborator
-func (s *RepositoriesService) RemoveCollaborator(owner, repo, user string) (*Response, error) {
+func (s *RepositoriesService) RemoveCollaborator(ctx context.Context, owner, repo, user string) (*Response, error) {
u := fmt.Sprintf("repos/%v/%v/collaborators/%v", owner, repo, user)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
diff --git a/vendor/github.com/google/go-github/github/repos_comments.go b/vendor/github.com/google/go-github/github/repos_comments.go
index d5917e1..4830ee2 100644
--- a/vendor/github.com/google/go-github/github/repos_comments.go
+++ b/vendor/github.com/google/go-github/github/repos_comments.go
@@ -6,6 +6,7 @@
package github
import (
+ "context"
"fmt"
"time"
)
@@ -35,7 +36,7 @@ func (r RepositoryComment) String() string {
// ListComments lists all the comments for the repository.
//
// GitHub API docs: https://developer.github.com/v3/repos/comments/#list-commit-comments-for-a-repository
-func (s *RepositoriesService) ListComments(owner, repo string, opt *ListOptions) ([]*RepositoryComment, *Response, error) {
+func (s *RepositoriesService) ListComments(ctx context.Context, owner, repo string, opt *ListOptions) ([]*RepositoryComment, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/comments", owner, repo)
u, err := addOptions(u, opt)
if err != nil {
@@ -51,7 +52,7 @@ func (s *RepositoriesService) ListComments(owner, repo string, opt *ListOptions)
req.Header.Set("Accept", mediaTypeReactionsPreview)
var comments []*RepositoryComment
- resp, err := s.client.Do(req, &comments)
+ resp, err := s.client.Do(ctx, req, &comments)
if err != nil {
return nil, resp, err
}
@@ -62,7 +63,7 @@ func (s *RepositoriesService) ListComments(owner, repo string, opt *ListOptions)
// ListCommitComments lists all the comments for a given commit SHA.
//
// GitHub API docs: https://developer.github.com/v3/repos/comments/#list-comments-for-a-single-commit
-func (s *RepositoriesService) ListCommitComments(owner, repo, sha string, opt *ListOptions) ([]*RepositoryComment, *Response, error) {
+func (s *RepositoriesService) ListCommitComments(ctx context.Context, owner, repo, sha string, opt *ListOptions) ([]*RepositoryComment, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/commits/%v/comments", owner, repo, sha)
u, err := addOptions(u, opt)
if err != nil {
@@ -78,7 +79,7 @@ func (s *RepositoriesService) ListCommitComments(owner, repo, sha string, opt *L
req.Header.Set("Accept", mediaTypeReactionsPreview)
var comments []*RepositoryComment
- resp, err := s.client.Do(req, &comments)
+ resp, err := s.client.Do(ctx, req, &comments)
if err != nil {
return nil, resp, err
}
@@ -90,7 +91,7 @@ func (s *RepositoriesService) ListCommitComments(owner, repo, sha string, opt *L
// Note: GitHub allows for comments to be created for non-existing files and positions.
//
// GitHub API docs: https://developer.github.com/v3/repos/comments/#create-a-commit-comment
-func (s *RepositoriesService) CreateComment(owner, repo, sha string, comment *RepositoryComment) (*RepositoryComment, *Response, error) {
+func (s *RepositoriesService) CreateComment(ctx context.Context, owner, repo, sha string, comment *RepositoryComment) (*RepositoryComment, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/commits/%v/comments", owner, repo, sha)
req, err := s.client.NewRequest("POST", u, comment)
if err != nil {
@@ -98,7 +99,7 @@ func (s *RepositoriesService) CreateComment(owner, repo, sha string, comment *Re
}
c := new(RepositoryComment)
- resp, err := s.client.Do(req, c)
+ resp, err := s.client.Do(ctx, req, c)
if err != nil {
return nil, resp, err
}
@@ -109,7 +110,7 @@ func (s *RepositoriesService) CreateComment(owner, repo, sha string, comment *Re
// GetComment gets a single comment from a repository.
//
// GitHub API docs: https://developer.github.com/v3/repos/comments/#get-a-single-commit-comment
-func (s *RepositoriesService) GetComment(owner, repo string, id int) (*RepositoryComment, *Response, error) {
+func (s *RepositoriesService) GetComment(ctx context.Context, owner, repo string, id int) (*RepositoryComment, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/comments/%v", owner, repo, id)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -120,7 +121,7 @@ func (s *RepositoriesService) GetComment(owner, repo string, id int) (*Repositor
req.Header.Set("Accept", mediaTypeReactionsPreview)
c := new(RepositoryComment)
- resp, err := s.client.Do(req, c)
+ resp, err := s.client.Do(ctx, req, c)
if err != nil {
return nil, resp, err
}
@@ -131,7 +132,7 @@ func (s *RepositoriesService) GetComment(owner, repo string, id int) (*Repositor
// UpdateComment updates the body of a single comment.
//
// GitHub API docs: https://developer.github.com/v3/repos/comments/#update-a-commit-comment
-func (s *RepositoriesService) UpdateComment(owner, repo string, id int, comment *RepositoryComment) (*RepositoryComment, *Response, error) {
+func (s *RepositoriesService) UpdateComment(ctx context.Context, owner, repo string, id int, comment *RepositoryComment) (*RepositoryComment, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/comments/%v", owner, repo, id)
req, err := s.client.NewRequest("PATCH", u, comment)
if err != nil {
@@ -139,7 +140,7 @@ func (s *RepositoriesService) UpdateComment(owner, repo string, id int, comment
}
c := new(RepositoryComment)
- resp, err := s.client.Do(req, c)
+ resp, err := s.client.Do(ctx, req, c)
if err != nil {
return nil, resp, err
}
@@ -150,11 +151,11 @@ func (s *RepositoriesService) UpdateComment(owner, repo string, id int, comment
// DeleteComment deletes a single comment from a repository.
//
// GitHub API docs: https://developer.github.com/v3/repos/comments/#delete-a-commit-comment
-func (s *RepositoriesService) DeleteComment(owner, repo string, id int) (*Response, error) {
+func (s *RepositoriesService) DeleteComment(ctx context.Context, owner, repo string, id int) (*Response, error) {
u := fmt.Sprintf("repos/%v/%v/comments/%v", owner, repo, id)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
diff --git a/vendor/github.com/google/go-github/github/repos_commits.go b/vendor/github.com/google/go-github/github/repos_commits.go
index 110e7b2..e516f1a 100644
--- a/vendor/github.com/google/go-github/github/repos_commits.go
+++ b/vendor/github.com/google/go-github/github/repos_commits.go
@@ -7,6 +7,7 @@ package github
import (
"bytes"
+ "context"
"fmt"
"time"
)
@@ -108,7 +109,7 @@ type CommitsListOptions struct {
// ListCommits lists the commits of a repository.
//
// GitHub API docs: https://developer.github.com/v3/repos/commits/#list
-func (s *RepositoriesService) ListCommits(owner, repo string, opt *CommitsListOptions) ([]*RepositoryCommit, *Response, error) {
+func (s *RepositoriesService) ListCommits(ctx context.Context, owner, repo string, opt *CommitsListOptions) ([]*RepositoryCommit, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/commits", owner, repo)
u, err := addOptions(u, opt)
if err != nil {
@@ -121,7 +122,7 @@ func (s *RepositoriesService) ListCommits(owner, repo string, opt *CommitsListOp
}
var commits []*RepositoryCommit
- resp, err := s.client.Do(req, &commits)
+ resp, err := s.client.Do(ctx, req, &commits)
if err != nil {
return nil, resp, err
}
@@ -134,7 +135,7 @@ func (s *RepositoriesService) ListCommits(owner, repo string, opt *CommitsListOp
//
// GitHub API docs: https://developer.github.com/v3/repos/commits/#get-a-single-commit
// See also: https://developer.github.com//v3/git/commits/#get-a-single-commit provides the same functionality
-func (s *RepositoriesService) GetCommit(owner, repo, sha string) (*RepositoryCommit, *Response, error) {
+func (s *RepositoriesService) GetCommit(ctx context.Context, owner, repo, sha string) (*RepositoryCommit, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/commits/%v", owner, repo, sha)
req, err := s.client.NewRequest("GET", u, nil)
@@ -146,7 +147,7 @@ func (s *RepositoriesService) GetCommit(owner, repo, sha string) (*RepositoryCom
req.Header.Set("Accept", mediaTypeGitSigningPreview)
commit := new(RepositoryCommit)
- resp, err := s.client.Do(req, commit)
+ resp, err := s.client.Do(ctx, req, commit)
if err != nil {
return nil, resp, err
}
@@ -158,7 +159,7 @@ func (s *RepositoriesService) GetCommit(owner, repo, sha string) (*RepositoryCom
// supplied and no new commits have occurred, a 304 Unmodified response is returned.
//
// GitHub API docs: https://developer.github.com/v3/repos/commits/#get-the-sha-1-of-a-commit-reference
-func (s *RepositoriesService) GetCommitSHA1(owner, repo, ref, lastSHA string) (string, *Response, error) {
+func (s *RepositoriesService) GetCommitSHA1(ctx context.Context, owner, repo, ref, lastSHA string) (string, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/commits/%v", owner, repo, ref)
req, err := s.client.NewRequest("GET", u, nil)
@@ -172,7 +173,7 @@ func (s *RepositoriesService) GetCommitSHA1(owner, repo, ref, lastSHA string) (s
req.Header.Set("Accept", mediaTypeV3SHA)
var buf bytes.Buffer
- resp, err := s.client.Do(req, &buf)
+ resp, err := s.client.Do(ctx, req, &buf)
if err != nil {
return "", resp, err
}
@@ -184,7 +185,7 @@ func (s *RepositoriesService) GetCommitSHA1(owner, repo, ref, lastSHA string) (s
// todo: support media formats - https://github.com/google/go-github/issues/6
//
// GitHub API docs: https://developer.github.com/v3/repos/commits/index.html#compare-two-commits
-func (s *RepositoriesService) CompareCommits(owner, repo string, base, head string) (*CommitsComparison, *Response, error) {
+func (s *RepositoriesService) CompareCommits(ctx context.Context, owner, repo string, base, head string) (*CommitsComparison, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/compare/%v...%v", owner, repo, base, head)
req, err := s.client.NewRequest("GET", u, nil)
@@ -193,7 +194,7 @@ func (s *RepositoriesService) CompareCommits(owner, repo string, base, head stri
}
comp := new(CommitsComparison)
- resp, err := s.client.Do(req, comp)
+ resp, err := s.client.Do(ctx, req, comp)
if err != nil {
return nil, resp, err
}
diff --git a/vendor/github.com/google/go-github/github/repos_contents.go b/vendor/github.com/google/go-github/github/repos_contents.go
index 32b1573..dfcbe33 100644
--- a/vendor/github.com/google/go-github/github/repos_contents.go
+++ b/vendor/github.com/google/go-github/github/repos_contents.go
@@ -4,11 +4,12 @@
// license that can be found in the LICENSE file.
// Repository contents API methods.
-// https://developer.github.com/v3/repos/contents/
+// GitHub API docs: https://developer.github.com/v3/repos/contents/
package github
import (
+ "context"
"encoding/base64"
"encoding/json"
"fmt"
@@ -87,7 +88,7 @@ func (r *RepositoryContent) GetContent() (string, error) {
// GetReadme gets the Readme file for the repository.
//
// GitHub API docs: https://developer.github.com/v3/repos/contents/#get-the-readme
-func (s *RepositoriesService) GetReadme(owner, repo string, opt *RepositoryContentGetOptions) (*RepositoryContent, *Response, error) {
+func (s *RepositoriesService) GetReadme(ctx context.Context, owner, repo string, opt *RepositoryContentGetOptions) (*RepositoryContent, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/readme", owner, repo)
u, err := addOptions(u, opt)
if err != nil {
@@ -98,7 +99,7 @@ func (s *RepositoriesService) GetReadme(owner, repo string, opt *RepositoryConte
return nil, nil, err
}
readme := new(RepositoryContent)
- resp, err := s.client.Do(req, readme)
+ resp, err := s.client.Do(ctx, req, readme)
if err != nil {
return nil, resp, err
}
@@ -109,10 +110,10 @@ func (s *RepositoriesService) GetReadme(owner, repo string, opt *RepositoryConte
// specified file. This function will work with files of any size, as opposed
// to GetContents which is limited to 1 Mb files. It is the caller's
// responsibility to close the ReadCloser.
-func (s *RepositoriesService) DownloadContents(owner, repo, filepath string, opt *RepositoryContentGetOptions) (io.ReadCloser, error) {
+func (s *RepositoriesService) DownloadContents(ctx context.Context, owner, repo, filepath string, opt *RepositoryContentGetOptions) (io.ReadCloser, error) {
dir := path.Dir(filepath)
filename := path.Base(filepath)
- _, dirContents, _, err := s.GetContents(owner, repo, dir, opt)
+ _, dirContents, _, err := s.GetContents(ctx, owner, repo, dir, opt)
if err != nil {
return nil, err
}
@@ -139,7 +140,7 @@ func (s *RepositoriesService) DownloadContents(owner, repo, filepath string, opt
// value and the other will be nil.
//
// GitHub API docs: https://developer.github.com/v3/repos/contents/#get-contents
-func (s *RepositoriesService) GetContents(owner, repo, path string, opt *RepositoryContentGetOptions) (fileContent *RepositoryContent, directoryContent []*RepositoryContent, resp *Response, err error) {
+func (s *RepositoriesService) GetContents(ctx context.Context, owner, repo, path string, opt *RepositoryContentGetOptions) (fileContent *RepositoryContent, directoryContent []*RepositoryContent, resp *Response, err error) {
escapedPath := (&url.URL{Path: path}).String()
u := fmt.Sprintf("repos/%s/%s/contents/%s", owner, repo, escapedPath)
u, err = addOptions(u, opt)
@@ -151,17 +152,17 @@ func (s *RepositoriesService) GetContents(owner, repo, path string, opt *Reposit
return nil, nil, nil, err
}
var rawJSON json.RawMessage
- resp, err = s.client.Do(req, &rawJSON)
+ resp, err = s.client.Do(ctx, req, &rawJSON)
if err != nil {
return nil, nil, resp, err
}
fileUnmarshalError := json.Unmarshal(rawJSON, &fileContent)
if fileUnmarshalError == nil {
- return fileContent, nil, resp, fileUnmarshalError
+ return fileContent, nil, resp, nil
}
directoryUnmarshalError := json.Unmarshal(rawJSON, &directoryContent)
if directoryUnmarshalError == nil {
- return nil, directoryContent, resp, directoryUnmarshalError
+ return nil, directoryContent, resp, nil
}
return nil, nil, resp, fmt.Errorf("unmarshalling failed for both file and directory content: %s and %s ", fileUnmarshalError, directoryUnmarshalError)
}
@@ -170,14 +171,14 @@ func (s *RepositoriesService) GetContents(owner, repo, path string, opt *Reposit
// the commit and file metadata.
//
// GitHub API docs: https://developer.github.com/v3/repos/contents/#create-a-file
-func (s *RepositoriesService) CreateFile(owner, repo, path string, opt *RepositoryContentFileOptions) (*RepositoryContentResponse, *Response, error) {
+func (s *RepositoriesService) CreateFile(ctx context.Context, owner, repo, path string, opt *RepositoryContentFileOptions) (*RepositoryContentResponse, *Response, error) {
u := fmt.Sprintf("repos/%s/%s/contents/%s", owner, repo, path)
req, err := s.client.NewRequest("PUT", u, opt)
if err != nil {
return nil, nil, err
}
createResponse := new(RepositoryContentResponse)
- resp, err := s.client.Do(req, createResponse)
+ resp, err := s.client.Do(ctx, req, createResponse)
if err != nil {
return nil, resp, err
}
@@ -188,14 +189,14 @@ func (s *RepositoriesService) CreateFile(owner, repo, path string, opt *Reposito
// commit and file metadata. Requires the blob SHA of the file being updated.
//
// GitHub API docs: https://developer.github.com/v3/repos/contents/#update-a-file
-func (s *RepositoriesService) UpdateFile(owner, repo, path string, opt *RepositoryContentFileOptions) (*RepositoryContentResponse, *Response, error) {
+func (s *RepositoriesService) UpdateFile(ctx context.Context, owner, repo, path string, opt *RepositoryContentFileOptions) (*RepositoryContentResponse, *Response, error) {
u := fmt.Sprintf("repos/%s/%s/contents/%s", owner, repo, path)
req, err := s.client.NewRequest("PUT", u, opt)
if err != nil {
return nil, nil, err
}
updateResponse := new(RepositoryContentResponse)
- resp, err := s.client.Do(req, updateResponse)
+ resp, err := s.client.Do(ctx, req, updateResponse)
if err != nil {
return nil, resp, err
}
@@ -206,14 +207,14 @@ func (s *RepositoriesService) UpdateFile(owner, repo, path string, opt *Reposito
// Requires the blob SHA of the file to be deleted.
//
// GitHub API docs: https://developer.github.com/v3/repos/contents/#delete-a-file
-func (s *RepositoriesService) DeleteFile(owner, repo, path string, opt *RepositoryContentFileOptions) (*RepositoryContentResponse, *Response, error) {
+func (s *RepositoriesService) DeleteFile(ctx context.Context, owner, repo, path string, opt *RepositoryContentFileOptions) (*RepositoryContentResponse, *Response, error) {
u := fmt.Sprintf("repos/%s/%s/contents/%s", owner, repo, path)
req, err := s.client.NewRequest("DELETE", u, opt)
if err != nil {
return nil, nil, err
}
deleteResponse := new(RepositoryContentResponse)
- resp, err := s.client.Do(req, deleteResponse)
+ resp, err := s.client.Do(ctx, req, deleteResponse)
if err != nil {
return nil, resp, err
}
@@ -236,7 +237,7 @@ const (
// or github.Zipball constant.
//
// GitHub API docs: https://developer.github.com/v3/repos/contents/#get-archive-link
-func (s *RepositoriesService) GetArchiveLink(owner, repo string, archiveformat archiveFormat, opt *RepositoryContentGetOptions) (*url.URL, *Response, error) {
+func (s *RepositoriesService) GetArchiveLink(ctx context.Context, owner, repo string, archiveformat archiveFormat, opt *RepositoryContentGetOptions) (*url.URL, *Response, error) {
u := fmt.Sprintf("repos/%s/%s/%s", owner, repo, archiveformat)
if opt != nil && opt.Ref != "" {
u += fmt.Sprintf("/%s", opt.Ref)
@@ -247,6 +248,7 @@ func (s *RepositoriesService) GetArchiveLink(owner, repo string, archiveformat a
}
var resp *http.Response
// Use http.DefaultTransport if no custom Transport is configured
+ ctx, req = withContext(ctx, req)
if s.client.client.Transport == nil {
resp, err = http.DefaultTransport.RoundTrip(req)
} else {
diff --git a/vendor/github.com/google/go-github/github/repos_deployments.go b/vendor/github.com/google/go-github/github/repos_deployments.go
index ad931af..9054ca9 100644
--- a/vendor/github.com/google/go-github/github/repos_deployments.go
+++ b/vendor/github.com/google/go-github/github/repos_deployments.go
@@ -6,6 +6,7 @@
package github
import (
+ "context"
"encoding/json"
"fmt"
)
@@ -61,7 +62,7 @@ type DeploymentsListOptions struct {
// ListDeployments lists the deployments of a repository.
//
// GitHub API docs: https://developer.github.com/v3/repos/deployments/#list-deployments
-func (s *RepositoriesService) ListDeployments(owner, repo string, opt *DeploymentsListOptions) ([]*Deployment, *Response, error) {
+func (s *RepositoriesService) ListDeployments(ctx context.Context, owner, repo string, opt *DeploymentsListOptions) ([]*Deployment, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/deployments", owner, repo)
u, err := addOptions(u, opt)
if err != nil {
@@ -74,7 +75,7 @@ func (s *RepositoriesService) ListDeployments(owner, repo string, opt *Deploymen
}
var deployments []*Deployment
- resp, err := s.client.Do(req, &deployments)
+ resp, err := s.client.Do(ctx, req, &deployments)
if err != nil {
return nil, resp, err
}
@@ -85,7 +86,7 @@ func (s *RepositoriesService) ListDeployments(owner, repo string, opt *Deploymen
// GetDeployment returns a single deployment of a repository.
//
// GitHub API docs: https://developer.github.com/v3/repos/deployments/#get-a-single-deployment
-func (s *RepositoriesService) GetDeployment(owner, repo string, deploymentID int) (*Deployment, *Response, error) {
+func (s *RepositoriesService) GetDeployment(ctx context.Context, owner, repo string, deploymentID int) (*Deployment, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/deployments/%v", owner, repo, deploymentID)
req, err := s.client.NewRequest("GET", u, nil)
@@ -94,7 +95,7 @@ func (s *RepositoriesService) GetDeployment(owner, repo string, deploymentID int
}
deployment := new(Deployment)
- resp, err := s.client.Do(req, deployment)
+ resp, err := s.client.Do(ctx, req, deployment)
if err != nil {
return nil, resp, err
}
@@ -105,7 +106,7 @@ func (s *RepositoriesService) GetDeployment(owner, repo string, deploymentID int
// CreateDeployment creates a new deployment for a repository.
//
// GitHub API docs: https://developer.github.com/v3/repos/deployments/#create-a-deployment
-func (s *RepositoriesService) CreateDeployment(owner, repo string, request *DeploymentRequest) (*Deployment, *Response, error) {
+func (s *RepositoriesService) CreateDeployment(ctx context.Context, owner, repo string, request *DeploymentRequest) (*Deployment, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/deployments", owner, repo)
req, err := s.client.NewRequest("POST", u, request)
@@ -117,7 +118,7 @@ func (s *RepositoriesService) CreateDeployment(owner, repo string, request *Depl
req.Header.Set("Accept", mediaTypeDeploymentStatusPreview)
d := new(Deployment)
- resp, err := s.client.Do(req, d)
+ resp, err := s.client.Do(ctx, req, d)
if err != nil {
return nil, resp, err
}
@@ -153,7 +154,7 @@ type DeploymentStatusRequest struct {
// ListDeploymentStatuses lists the statuses of a given deployment of a repository.
//
// GitHub API docs: https://developer.github.com/v3/repos/deployments/#list-deployment-statuses
-func (s *RepositoriesService) ListDeploymentStatuses(owner, repo string, deployment int, opt *ListOptions) ([]*DeploymentStatus, *Response, error) {
+func (s *RepositoriesService) ListDeploymentStatuses(ctx context.Context, owner, repo string, deployment int, opt *ListOptions) ([]*DeploymentStatus, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/deployments/%v/statuses", owner, repo, deployment)
u, err := addOptions(u, opt)
if err != nil {
@@ -166,7 +167,7 @@ func (s *RepositoriesService) ListDeploymentStatuses(owner, repo string, deploym
}
var statuses []*DeploymentStatus
- resp, err := s.client.Do(req, &statuses)
+ resp, err := s.client.Do(ctx, req, &statuses)
if err != nil {
return nil, resp, err
}
@@ -177,7 +178,7 @@ func (s *RepositoriesService) ListDeploymentStatuses(owner, repo string, deploym
// GetDeploymentStatus returns a single deployment status of a repository.
//
// GitHub API docs: https://developer.github.com/v3/repos/deployments/#get-a-single-deployment-status
-func (s *RepositoriesService) GetDeploymentStatus(owner, repo string, deploymentID, deploymentStatusID int) (*DeploymentStatus, *Response, error) {
+func (s *RepositoriesService) GetDeploymentStatus(ctx context.Context, owner, repo string, deploymentID, deploymentStatusID int) (*DeploymentStatus, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/deployments/%v/statuses/%v", owner, repo, deploymentID, deploymentStatusID)
req, err := s.client.NewRequest("GET", u, nil)
@@ -189,7 +190,7 @@ func (s *RepositoriesService) GetDeploymentStatus(owner, repo string, deployment
req.Header.Set("Accept", mediaTypeDeploymentStatusPreview)
d := new(DeploymentStatus)
- resp, err := s.client.Do(req, d)
+ resp, err := s.client.Do(ctx, req, d)
if err != nil {
return nil, resp, err
}
@@ -200,7 +201,7 @@ func (s *RepositoriesService) GetDeploymentStatus(owner, repo string, deployment
// CreateDeploymentStatus creates a new status for a deployment.
//
// GitHub API docs: https://developer.github.com/v3/repos/deployments/#create-a-deployment-status
-func (s *RepositoriesService) CreateDeploymentStatus(owner, repo string, deployment int, request *DeploymentStatusRequest) (*DeploymentStatus, *Response, error) {
+func (s *RepositoriesService) CreateDeploymentStatus(ctx context.Context, owner, repo string, deployment int, request *DeploymentStatusRequest) (*DeploymentStatus, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/deployments/%v/statuses", owner, repo, deployment)
req, err := s.client.NewRequest("POST", u, request)
@@ -212,7 +213,7 @@ func (s *RepositoriesService) CreateDeploymentStatus(owner, repo string, deploym
req.Header.Set("Accept", mediaTypeDeploymentStatusPreview)
d := new(DeploymentStatus)
- resp, err := s.client.Do(req, d)
+ resp, err := s.client.Do(ctx, req, d)
if err != nil {
return nil, resp, err
}
diff --git a/vendor/github.com/google/go-github/github/repos_forks.go b/vendor/github.com/google/go-github/github/repos_forks.go
index fe98a6c..6b5e4ea 100644
--- a/vendor/github.com/google/go-github/github/repos_forks.go
+++ b/vendor/github.com/google/go-github/github/repos_forks.go
@@ -5,7 +5,10 @@
package github
-import "fmt"
+import (
+ "context"
+ "fmt"
+)
// RepositoryListForksOptions specifies the optional parameters to the
// RepositoriesService.ListForks method.
@@ -20,7 +23,7 @@ type RepositoryListForksOptions struct {
// ListForks lists the forks of the specified repository.
//
// GitHub API docs: https://developer.github.com/v3/repos/forks/#list-forks
-func (s *RepositoriesService) ListForks(owner, repo string, opt *RepositoryListForksOptions) ([]*Repository, *Response, error) {
+func (s *RepositoriesService) ListForks(ctx context.Context, owner, repo string, opt *RepositoryListForksOptions) ([]*Repository, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/forks", owner, repo)
u, err := addOptions(u, opt)
if err != nil {
@@ -33,7 +36,7 @@ func (s *RepositoriesService) ListForks(owner, repo string, opt *RepositoryListF
}
var repos []*Repository
- resp, err := s.client.Do(req, &repos)
+ resp, err := s.client.Do(ctx, req, &repos)
if err != nil {
return nil, resp, err
}
@@ -57,7 +60,7 @@ type RepositoryCreateForkOptions struct {
// in a successful request.
//
// GitHub API docs: https://developer.github.com/v3/repos/forks/#create-a-fork
-func (s *RepositoriesService) CreateFork(owner, repo string, opt *RepositoryCreateForkOptions) (*Repository, *Response, error) {
+func (s *RepositoriesService) CreateFork(ctx context.Context, owner, repo string, opt *RepositoryCreateForkOptions) (*Repository, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/forks", owner, repo)
u, err := addOptions(u, opt)
if err != nil {
@@ -70,7 +73,7 @@ func (s *RepositoriesService) CreateFork(owner, repo string, opt *RepositoryCrea
}
fork := new(Repository)
- resp, err := s.client.Do(req, fork)
+ resp, err := s.client.Do(ctx, req, fork)
if err != nil {
return nil, resp, err
}
diff --git a/vendor/github.com/google/go-github/github/repos_hooks.go b/vendor/github.com/google/go-github/github/repos_hooks.go
index 818286b..67ce96a 100644
--- a/vendor/github.com/google/go-github/github/repos_hooks.go
+++ b/vendor/github.com/google/go-github/github/repos_hooks.go
@@ -6,6 +6,7 @@
package github
import (
+ "context"
"fmt"
"time"
)
@@ -86,7 +87,7 @@ func (h Hook) String() string {
// Name and Config are required fields.
//
// GitHub API docs: https://developer.github.com/v3/repos/hooks/#create-a-hook
-func (s *RepositoriesService) CreateHook(owner, repo string, hook *Hook) (*Hook, *Response, error) {
+func (s *RepositoriesService) CreateHook(ctx context.Context, owner, repo string, hook *Hook) (*Hook, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/hooks", owner, repo)
req, err := s.client.NewRequest("POST", u, hook)
if err != nil {
@@ -94,7 +95,7 @@ func (s *RepositoriesService) CreateHook(owner, repo string, hook *Hook) (*Hook,
}
h := new(Hook)
- resp, err := s.client.Do(req, h)
+ resp, err := s.client.Do(ctx, req, h)
if err != nil {
return nil, resp, err
}
@@ -105,7 +106,7 @@ func (s *RepositoriesService) CreateHook(owner, repo string, hook *Hook) (*Hook,
// ListHooks lists all Hooks for the specified repository.
//
// GitHub API docs: https://developer.github.com/v3/repos/hooks/#list
-func (s *RepositoriesService) ListHooks(owner, repo string, opt *ListOptions) ([]*Hook, *Response, error) {
+func (s *RepositoriesService) ListHooks(ctx context.Context, owner, repo string, opt *ListOptions) ([]*Hook, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/hooks", owner, repo)
u, err := addOptions(u, opt)
if err != nil {
@@ -118,7 +119,7 @@ func (s *RepositoriesService) ListHooks(owner, repo string, opt *ListOptions) ([
}
var hooks []*Hook
- resp, err := s.client.Do(req, &hooks)
+ resp, err := s.client.Do(ctx, req, &hooks)
if err != nil {
return nil, resp, err
}
@@ -129,63 +130,63 @@ func (s *RepositoriesService) ListHooks(owner, repo string, opt *ListOptions) ([
// GetHook returns a single specified Hook.
//
// GitHub API docs: https://developer.github.com/v3/repos/hooks/#get-single-hook
-func (s *RepositoriesService) GetHook(owner, repo string, id int) (*Hook, *Response, error) {
+func (s *RepositoriesService) GetHook(ctx context.Context, owner, repo string, id int) (*Hook, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/hooks/%d", owner, repo, id)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
hook := new(Hook)
- resp, err := s.client.Do(req, hook)
+ resp, err := s.client.Do(ctx, req, hook)
return hook, resp, err
}
// EditHook updates a specified Hook.
//
// GitHub API docs: https://developer.github.com/v3/repos/hooks/#edit-a-hook
-func (s *RepositoriesService) EditHook(owner, repo string, id int, hook *Hook) (*Hook, *Response, error) {
+func (s *RepositoriesService) EditHook(ctx context.Context, owner, repo string, id int, hook *Hook) (*Hook, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/hooks/%d", owner, repo, id)
req, err := s.client.NewRequest("PATCH", u, hook)
if err != nil {
return nil, nil, err
}
h := new(Hook)
- resp, err := s.client.Do(req, h)
+ resp, err := s.client.Do(ctx, req, h)
return h, resp, err
}
// DeleteHook deletes a specified Hook.
//
// GitHub API docs: https://developer.github.com/v3/repos/hooks/#delete-a-hook
-func (s *RepositoriesService) DeleteHook(owner, repo string, id int) (*Response, error) {
+func (s *RepositoriesService) DeleteHook(ctx context.Context, owner, repo string, id int) (*Response, error) {
u := fmt.Sprintf("repos/%v/%v/hooks/%d", owner, repo, id)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
// PingHook triggers a 'ping' event to be sent to the Hook.
//
// GitHub API docs: https://developer.github.com/v3/repos/hooks/#ping-a-hook
-func (s *RepositoriesService) PingHook(owner, repo string, id int) (*Response, error) {
+func (s *RepositoriesService) PingHook(ctx context.Context, owner, repo string, id int) (*Response, error) {
u := fmt.Sprintf("repos/%v/%v/hooks/%d/pings", owner, repo, id)
req, err := s.client.NewRequest("POST", u, nil)
if err != nil {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
// TestHook triggers a test Hook by github.
//
// GitHub API docs: https://developer.github.com/v3/repos/hooks/#test-a-push-hook
-func (s *RepositoriesService) TestHook(owner, repo string, id int) (*Response, error) {
+func (s *RepositoriesService) TestHook(ctx context.Context, owner, repo string, id int) (*Response, error) {
u := fmt.Sprintf("repos/%v/%v/hooks/%d/tests", owner, repo, id)
req, err := s.client.NewRequest("POST", u, nil)
if err != nil {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
diff --git a/vendor/github.com/google/go-github/github/repos_invitations.go b/vendor/github.com/google/go-github/github/repos_invitations.go
index e80b946..a803a12 100644
--- a/vendor/github.com/google/go-github/github/repos_invitations.go
+++ b/vendor/github.com/google/go-github/github/repos_invitations.go
@@ -5,7 +5,10 @@
package github
-import "fmt"
+import (
+ "context"
+ "fmt"
+)
// RepositoryInvitation represents an invitation to collaborate on a repo.
type RepositoryInvitation struct {
@@ -25,8 +28,8 @@ type RepositoryInvitation struct {
// ListInvitations lists all currently-open repository invitations.
//
// GitHub API docs: https://developer.github.com/v3/repos/invitations/#list-invitations-for-a-repository
-func (s *RepositoriesService) ListInvitations(repoID int, opt *ListOptions) ([]*RepositoryInvitation, *Response, error) {
- u := fmt.Sprintf("repositories/%v/invitations", repoID)
+func (s *RepositoriesService) ListInvitations(ctx context.Context, owner, repo string, opt *ListOptions) ([]*RepositoryInvitation, *Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/invitations", owner, repo)
u, err := addOptions(u, opt)
if err != nil {
return nil, nil, err
@@ -41,7 +44,7 @@ func (s *RepositoriesService) ListInvitations(repoID int, opt *ListOptions) ([]*
req.Header.Set("Accept", mediaTypeRepositoryInvitationsPreview)
invites := []*RepositoryInvitation{}
- resp, err := s.client.Do(req, &invites)
+ resp, err := s.client.Do(ctx, req, &invites)
if err != nil {
return nil, resp, err
}
@@ -52,8 +55,8 @@ func (s *RepositoriesService) ListInvitations(repoID int, opt *ListOptions) ([]*
// DeleteInvitation deletes a repository invitation.
//
// GitHub API docs: https://developer.github.com/v3/repos/invitations/#delete-a-repository-invitation
-func (s *RepositoriesService) DeleteInvitation(repoID, invitationID int) (*Response, error) {
- u := fmt.Sprintf("repositories/%v/invitations/%v", repoID, invitationID)
+func (s *RepositoriesService) DeleteInvitation(ctx context.Context, owner, repo string, invitationID int) (*Response, error) {
+ u := fmt.Sprintf("repos/%v/%v/invitations/%v", owner, repo, invitationID)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
@@ -62,7 +65,7 @@ func (s *RepositoriesService) DeleteInvitation(repoID, invitationID int) (*Respo
// TODO: remove custom Accept header when this API fully launches.
req.Header.Set("Accept", mediaTypeRepositoryInvitationsPreview)
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
// UpdateInvitation updates the permissions associated with a repository
@@ -72,11 +75,11 @@ func (s *RepositoriesService) DeleteInvitation(repoID, invitationID int) (*Respo
// on the repository. Possible values are: "read", "write", "admin".
//
// GitHub API docs: https://developer.github.com/v3/repos/invitations/#update-a-repository-invitation
-func (s *RepositoriesService) UpdateInvitation(repoID, invitationID int, permissions string) (*RepositoryInvitation, *Response, error) {
+func (s *RepositoriesService) UpdateInvitation(ctx context.Context, owner, repo string, invitationID int, permissions string) (*RepositoryInvitation, *Response, error) {
opts := &struct {
Permissions string `json:"permissions"`
}{Permissions: permissions}
- u := fmt.Sprintf("repositories/%v/invitations/%v", repoID, invitationID)
+ u := fmt.Sprintf("repos/%v/%v/invitations/%v", owner, repo, invitationID)
req, err := s.client.NewRequest("PATCH", u, opts)
if err != nil {
return nil, nil, err
@@ -86,6 +89,6 @@ func (s *RepositoriesService) UpdateInvitation(repoID, invitationID int, permiss
req.Header.Set("Accept", mediaTypeRepositoryInvitationsPreview)
invite := &RepositoryInvitation{}
- resp, err := s.client.Do(req, invite)
+ resp, err := s.client.Do(ctx, req, invite)
return invite, resp, err
}
diff --git a/vendor/github.com/google/go-github/github/repos_keys.go b/vendor/github.com/google/go-github/github/repos_keys.go
index 1ac35da..f5a8658 100644
--- a/vendor/github.com/google/go-github/github/repos_keys.go
+++ b/vendor/github.com/google/go-github/github/repos_keys.go
@@ -5,14 +5,17 @@
package github
-import "fmt"
+import (
+ "context"
+ "fmt"
+)
// The Key type is defined in users_keys.go
// ListKeys lists the deploy keys for a repository.
//
// GitHub API docs: https://developer.github.com/v3/repos/keys/#list
-func (s *RepositoriesService) ListKeys(owner string, repo string, opt *ListOptions) ([]*Key, *Response, error) {
+func (s *RepositoriesService) ListKeys(ctx context.Context, owner string, repo string, opt *ListOptions) ([]*Key, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/keys", owner, repo)
u, err := addOptions(u, opt)
if err != nil {
@@ -25,7 +28,7 @@ func (s *RepositoriesService) ListKeys(owner string, repo string, opt *ListOptio
}
var keys []*Key
- resp, err := s.client.Do(req, &keys)
+ resp, err := s.client.Do(ctx, req, &keys)
if err != nil {
return nil, resp, err
}
@@ -36,7 +39,7 @@ func (s *RepositoriesService) ListKeys(owner string, repo string, opt *ListOptio
// GetKey fetches a single deploy key.
//
// GitHub API docs: https://developer.github.com/v3/repos/keys/#get
-func (s *RepositoriesService) GetKey(owner string, repo string, id int) (*Key, *Response, error) {
+func (s *RepositoriesService) GetKey(ctx context.Context, owner string, repo string, id int) (*Key, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/keys/%v", owner, repo, id)
req, err := s.client.NewRequest("GET", u, nil)
@@ -45,7 +48,7 @@ func (s *RepositoriesService) GetKey(owner string, repo string, id int) (*Key, *
}
key := new(Key)
- resp, err := s.client.Do(req, key)
+ resp, err := s.client.Do(ctx, req, key)
if err != nil {
return nil, resp, err
}
@@ -56,7 +59,7 @@ func (s *RepositoriesService) GetKey(owner string, repo string, id int) (*Key, *
// CreateKey adds a deploy key for a repository.
//
// GitHub API docs: https://developer.github.com/v3/repos/keys/#create
-func (s *RepositoriesService) CreateKey(owner string, repo string, key *Key) (*Key, *Response, error) {
+func (s *RepositoriesService) CreateKey(ctx context.Context, owner string, repo string, key *Key) (*Key, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/keys", owner, repo)
req, err := s.client.NewRequest("POST", u, key)
@@ -65,7 +68,7 @@ func (s *RepositoriesService) CreateKey(owner string, repo string, key *Key) (*K
}
k := new(Key)
- resp, err := s.client.Do(req, k)
+ resp, err := s.client.Do(ctx, req, k)
if err != nil {
return nil, resp, err
}
@@ -76,7 +79,7 @@ func (s *RepositoriesService) CreateKey(owner string, repo string, key *Key) (*K
// EditKey edits a deploy key.
//
// GitHub API docs: https://developer.github.com/v3/repos/keys/#edit
-func (s *RepositoriesService) EditKey(owner string, repo string, id int, key *Key) (*Key, *Response, error) {
+func (s *RepositoriesService) EditKey(ctx context.Context, owner string, repo string, id int, key *Key) (*Key, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/keys/%v", owner, repo, id)
req, err := s.client.NewRequest("PATCH", u, key)
@@ -85,7 +88,7 @@ func (s *RepositoriesService) EditKey(owner string, repo string, id int, key *Ke
}
k := new(Key)
- resp, err := s.client.Do(req, k)
+ resp, err := s.client.Do(ctx, req, k)
if err != nil {
return nil, resp, err
}
@@ -96,7 +99,7 @@ func (s *RepositoriesService) EditKey(owner string, repo string, id int, key *Ke
// DeleteKey deletes a deploy key.
//
// GitHub API docs: https://developer.github.com/v3/repos/keys/#delete
-func (s *RepositoriesService) DeleteKey(owner string, repo string, id int) (*Response, error) {
+func (s *RepositoriesService) DeleteKey(ctx context.Context, owner string, repo string, id int) (*Response, error) {
u := fmt.Sprintf("repos/%v/%v/keys/%v", owner, repo, id)
req, err := s.client.NewRequest("DELETE", u, nil)
@@ -104,5 +107,5 @@ func (s *RepositoriesService) DeleteKey(owner string, repo string, id int) (*Res
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
diff --git a/vendor/github.com/google/go-github/github/repos_merging.go b/vendor/github.com/google/go-github/github/repos_merging.go
index f9aefb4..04383c1 100644
--- a/vendor/github.com/google/go-github/github/repos_merging.go
+++ b/vendor/github.com/google/go-github/github/repos_merging.go
@@ -6,6 +6,7 @@
package github
import (
+ "context"
"fmt"
)
@@ -20,7 +21,7 @@ type RepositoryMergeRequest struct {
// Merge a branch in the specified repository.
//
// GitHub API docs: https://developer.github.com/v3/repos/merging/#perform-a-merge
-func (s *RepositoriesService) Merge(owner, repo string, request *RepositoryMergeRequest) (*RepositoryCommit, *Response, error) {
+func (s *RepositoriesService) Merge(ctx context.Context, owner, repo string, request *RepositoryMergeRequest) (*RepositoryCommit, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/merges", owner, repo)
req, err := s.client.NewRequest("POST", u, request)
if err != nil {
@@ -28,7 +29,7 @@ func (s *RepositoriesService) Merge(owner, repo string, request *RepositoryMerge
}
commit := new(RepositoryCommit)
- resp, err := s.client.Do(req, commit)
+ resp, err := s.client.Do(ctx, req, commit)
if err != nil {
return nil, resp, err
}
diff --git a/vendor/github.com/google/go-github/github/repos_pages.go b/vendor/github.com/google/go-github/github/repos_pages.go
index e4bb6d8..3d19b43 100644
--- a/vendor/github.com/google/go-github/github/repos_pages.go
+++ b/vendor/github.com/google/go-github/github/repos_pages.go
@@ -5,7 +5,10 @@
package github
-import "fmt"
+import (
+ "context"
+ "fmt"
+)
// Pages represents a GitHub Pages site configuration.
type Pages struct {
@@ -36,7 +39,7 @@ type PagesBuild struct {
// GetPagesInfo fetches information about a GitHub Pages site.
//
// GitHub API docs: https://developer.github.com/v3/repos/pages/#get-information-about-a-pages-site
-func (s *RepositoriesService) GetPagesInfo(owner, repo string) (*Pages, *Response, error) {
+func (s *RepositoriesService) GetPagesInfo(ctx context.Context, owner, repo string) (*Pages, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/pages", owner, repo)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -47,7 +50,7 @@ func (s *RepositoriesService) GetPagesInfo(owner, repo string) (*Pages, *Respons
req.Header.Set("Accept", mediaTypePagesPreview)
site := new(Pages)
- resp, err := s.client.Do(req, site)
+ resp, err := s.client.Do(ctx, req, site)
if err != nil {
return nil, resp, err
}
@@ -58,7 +61,7 @@ func (s *RepositoriesService) GetPagesInfo(owner, repo string) (*Pages, *Respons
// ListPagesBuilds lists the builds for a GitHub Pages site.
//
// GitHub API docs: https://developer.github.com/v3/repos/pages/#list-pages-builds
-func (s *RepositoriesService) ListPagesBuilds(owner, repo string) ([]*PagesBuild, *Response, error) {
+func (s *RepositoriesService) ListPagesBuilds(ctx context.Context, owner, repo string) ([]*PagesBuild, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/pages/builds", owner, repo)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -66,7 +69,7 @@ func (s *RepositoriesService) ListPagesBuilds(owner, repo string) ([]*PagesBuild
}
var pages []*PagesBuild
- resp, err := s.client.Do(req, &pages)
+ resp, err := s.client.Do(ctx, req, &pages)
if err != nil {
return nil, resp, err
}
@@ -77,7 +80,7 @@ func (s *RepositoriesService) ListPagesBuilds(owner, repo string) ([]*PagesBuild
// GetLatestPagesBuild fetches the latest build information for a GitHub pages site.
//
// GitHub API docs: https://developer.github.com/v3/repos/pages/#list-latest-pages-build
-func (s *RepositoriesService) GetLatestPagesBuild(owner, repo string) (*PagesBuild, *Response, error) {
+func (s *RepositoriesService) GetLatestPagesBuild(ctx context.Context, owner, repo string) (*PagesBuild, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/pages/builds/latest", owner, repo)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -85,7 +88,7 @@ func (s *RepositoriesService) GetLatestPagesBuild(owner, repo string) (*PagesBui
}
build := new(PagesBuild)
- resp, err := s.client.Do(req, build)
+ resp, err := s.client.Do(ctx, req, build)
if err != nil {
return nil, resp, err
}
@@ -96,7 +99,7 @@ func (s *RepositoriesService) GetLatestPagesBuild(owner, repo string) (*PagesBui
// GetPageBuild fetches the specific build information for a GitHub pages site.
//
// GitHub API docs: https://developer.github.com/v3/repos/pages/#list-a-specific-pages-build
-func (s *RepositoriesService) GetPageBuild(owner, repo string, id int) (*PagesBuild, *Response, error) {
+func (s *RepositoriesService) GetPageBuild(ctx context.Context, owner, repo string, id int) (*PagesBuild, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/pages/builds/%v", owner, repo, id)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -104,7 +107,7 @@ func (s *RepositoriesService) GetPageBuild(owner, repo string, id int) (*PagesBu
}
build := new(PagesBuild)
- resp, err := s.client.Do(req, build)
+ resp, err := s.client.Do(ctx, req, build)
if err != nil {
return nil, resp, err
}
@@ -115,7 +118,7 @@ func (s *RepositoriesService) GetPageBuild(owner, repo string, id int) (*PagesBu
// RequestPageBuild requests a build of a GitHub Pages site without needing to push new commit.
//
// GitHub API docs: https://developer.github.com/v3/repos/pages/#request-a-page-build
-func (s *RepositoriesService) RequestPageBuild(owner, repo string) (*PagesBuild, *Response, error) {
+func (s *RepositoriesService) RequestPageBuild(ctx context.Context, owner, repo string) (*PagesBuild, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/pages/builds", owner, repo)
req, err := s.client.NewRequest("POST", u, nil)
if err != nil {
@@ -126,7 +129,7 @@ func (s *RepositoriesService) RequestPageBuild(owner, repo string) (*PagesBuild,
req.Header.Set("Accept", mediaTypePagesPreview)
build := new(PagesBuild)
- resp, err := s.client.Do(req, build)
+ resp, err := s.client.Do(ctx, req, build)
if err != nil {
return nil, resp, err
}
diff --git a/vendor/github.com/google/go-github/github/repos_projects.go b/vendor/github.com/google/go-github/github/repos_projects.go
index dc9227c..770ffc7 100644
--- a/vendor/github.com/google/go-github/github/repos_projects.go
+++ b/vendor/github.com/google/go-github/github/repos_projects.go
@@ -1,16 +1,28 @@
-// Copyright 2016 The go-github AUTHORS. All rights reserved.
+// Copyright 2017 The go-github AUTHORS. All rights reserved.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package github
-import "fmt"
+import (
+ "context"
+ "fmt"
+)
+
+// ProjectListOptions specifies the optional parameters to the
+// OrganizationsService.ListProjects and RepositoriesService.ListProjects methods.
+type ProjectListOptions struct {
+ // Indicates the state of the projects to return. Can be either open, closed, or all. Default: open
+ State string `url:"state,omitempty"`
+
+ ListOptions
+}
// ListProjects lists the projects for a repo.
//
// GitHub API docs: https://developer.github.com/v3/projects/#list-repository-projects
-func (s *RepositoriesService) ListProjects(owner, repo string, opt *ListOptions) ([]*Project, *Response, error) {
+func (s *RepositoriesService) ListProjects(ctx context.Context, owner, repo string, opt *ProjectListOptions) ([]*Project, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/projects", owner, repo)
u, err := addOptions(u, opt)
if err != nil {
@@ -25,8 +37,8 @@ func (s *RepositoriesService) ListProjects(owner, repo string, opt *ListOptions)
// TODO: remove custom Accept header when this API fully launches.
req.Header.Set("Accept", mediaTypeProjectsPreview)
- projects := []*Project{}
- resp, err := s.client.Do(req, &projects)
+ var projects []*Project
+ resp, err := s.client.Do(ctx, req, &projects)
if err != nil {
return nil, resp, err
}
@@ -37,7 +49,7 @@ func (s *RepositoriesService) ListProjects(owner, repo string, opt *ListOptions)
// CreateProject creates a GitHub Project for the specified repository.
//
// GitHub API docs: https://developer.github.com/v3/projects/#create-a-repository-project
-func (s *RepositoriesService) CreateProject(owner, repo string, opt *ProjectOptions) (*Project, *Response, error) {
+func (s *RepositoriesService) CreateProject(ctx context.Context, owner, repo string, opt *ProjectOptions) (*Project, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/projects", owner, repo)
req, err := s.client.NewRequest("POST", u, opt)
if err != nil {
@@ -48,7 +60,7 @@ func (s *RepositoriesService) CreateProject(owner, repo string, opt *ProjectOpti
req.Header.Set("Accept", mediaTypeProjectsPreview)
project := &Project{}
- resp, err := s.client.Do(req, project)
+ resp, err := s.client.Do(ctx, req, project)
if err != nil {
return nil, resp, err
}
diff --git a/vendor/github.com/google/go-github/github/repos_releases.go b/vendor/github.com/google/go-github/github/repos_releases.go
index 10abc88..5c27565 100644
--- a/vendor/github.com/google/go-github/github/repos_releases.go
+++ b/vendor/github.com/google/go-github/github/repos_releases.go
@@ -6,6 +6,7 @@
package github
import (
+ "context"
"errors"
"fmt"
"io"
@@ -41,7 +42,7 @@ func (r RepositoryRelease) String() string {
return Stringify(r)
}
-// ReleaseAsset represents a Github release asset in a repository.
+// ReleaseAsset represents a GitHub release asset in a repository.
type ReleaseAsset struct {
ID *int `json:"id,omitempty"`
URL *string `json:"url,omitempty"`
@@ -64,7 +65,7 @@ func (r ReleaseAsset) String() string {
// ListReleases lists the releases for a repository.
//
// GitHub API docs: https://developer.github.com/v3/repos/releases/#list-releases-for-a-repository
-func (s *RepositoriesService) ListReleases(owner, repo string, opt *ListOptions) ([]*RepositoryRelease, *Response, error) {
+func (s *RepositoriesService) ListReleases(ctx context.Context, owner, repo string, opt *ListOptions) ([]*RepositoryRelease, *Response, error) {
u := fmt.Sprintf("repos/%s/%s/releases", owner, repo)
u, err := addOptions(u, opt)
if err != nil {
@@ -77,7 +78,7 @@ func (s *RepositoriesService) ListReleases(owner, repo string, opt *ListOptions)
}
var releases []*RepositoryRelease
- resp, err := s.client.Do(req, &releases)
+ resp, err := s.client.Do(ctx, req, &releases)
if err != nil {
return nil, resp, err
}
@@ -87,35 +88,35 @@ func (s *RepositoriesService) ListReleases(owner, repo string, opt *ListOptions)
// GetRelease fetches a single release.
//
// GitHub API docs: https://developer.github.com/v3/repos/releases/#get-a-single-release
-func (s *RepositoriesService) GetRelease(owner, repo string, id int) (*RepositoryRelease, *Response, error) {
+func (s *RepositoriesService) GetRelease(ctx context.Context, owner, repo string, id int) (*RepositoryRelease, *Response, error) {
u := fmt.Sprintf("repos/%s/%s/releases/%d", owner, repo, id)
- return s.getSingleRelease(u)
+ return s.getSingleRelease(ctx, u)
}
// GetLatestRelease fetches the latest published release for the repository.
//
// GitHub API docs: https://developer.github.com/v3/repos/releases/#get-the-latest-release
-func (s *RepositoriesService) GetLatestRelease(owner, repo string) (*RepositoryRelease, *Response, error) {
+func (s *RepositoriesService) GetLatestRelease(ctx context.Context, owner, repo string) (*RepositoryRelease, *Response, error) {
u := fmt.Sprintf("repos/%s/%s/releases/latest", owner, repo)
- return s.getSingleRelease(u)
+ return s.getSingleRelease(ctx, u)
}
// GetReleaseByTag fetches a release with the specified tag.
//
// GitHub API docs: https://developer.github.com/v3/repos/releases/#get-a-release-by-tag-name
-func (s *RepositoriesService) GetReleaseByTag(owner, repo, tag string) (*RepositoryRelease, *Response, error) {
+func (s *RepositoriesService) GetReleaseByTag(ctx context.Context, owner, repo, tag string) (*RepositoryRelease, *Response, error) {
u := fmt.Sprintf("repos/%s/%s/releases/tags/%s", owner, repo, tag)
- return s.getSingleRelease(u)
+ return s.getSingleRelease(ctx, u)
}
-func (s *RepositoriesService) getSingleRelease(url string) (*RepositoryRelease, *Response, error) {
+func (s *RepositoriesService) getSingleRelease(ctx context.Context, url string) (*RepositoryRelease, *Response, error) {
req, err := s.client.NewRequest("GET", url, nil)
if err != nil {
return nil, nil, err
}
release := new(RepositoryRelease)
- resp, err := s.client.Do(req, release)
+ resp, err := s.client.Do(ctx, req, release)
if err != nil {
return nil, resp, err
}
@@ -124,8 +125,8 @@ func (s *RepositoriesService) getSingleRelease(url string) (*RepositoryRelease,
// CreateRelease adds a new release for a repository.
//
-// GitHub API docs : https://developer.github.com/v3/repos/releases/#create-a-release
-func (s *RepositoriesService) CreateRelease(owner, repo string, release *RepositoryRelease) (*RepositoryRelease, *Response, error) {
+// GitHub API docs: https://developer.github.com/v3/repos/releases/#create-a-release
+func (s *RepositoriesService) CreateRelease(ctx context.Context, owner, repo string, release *RepositoryRelease) (*RepositoryRelease, *Response, error) {
u := fmt.Sprintf("repos/%s/%s/releases", owner, repo)
req, err := s.client.NewRequest("POST", u, release)
@@ -134,7 +135,7 @@ func (s *RepositoriesService) CreateRelease(owner, repo string, release *Reposit
}
r := new(RepositoryRelease)
- resp, err := s.client.Do(req, r)
+ resp, err := s.client.Do(ctx, req, r)
if err != nil {
return nil, resp, err
}
@@ -143,8 +144,8 @@ func (s *RepositoriesService) CreateRelease(owner, repo string, release *Reposit
// EditRelease edits a repository release.
//
-// GitHub API docs : https://developer.github.com/v3/repos/releases/#edit-a-release
-func (s *RepositoriesService) EditRelease(owner, repo string, id int, release *RepositoryRelease) (*RepositoryRelease, *Response, error) {
+// GitHub API docs: https://developer.github.com/v3/repos/releases/#edit-a-release
+func (s *RepositoriesService) EditRelease(ctx context.Context, owner, repo string, id int, release *RepositoryRelease) (*RepositoryRelease, *Response, error) {
u := fmt.Sprintf("repos/%s/%s/releases/%d", owner, repo, id)
req, err := s.client.NewRequest("PATCH", u, release)
@@ -153,7 +154,7 @@ func (s *RepositoriesService) EditRelease(owner, repo string, id int, release *R
}
r := new(RepositoryRelease)
- resp, err := s.client.Do(req, r)
+ resp, err := s.client.Do(ctx, req, r)
if err != nil {
return nil, resp, err
}
@@ -162,21 +163,21 @@ func (s *RepositoriesService) EditRelease(owner, repo string, id int, release *R
// DeleteRelease delete a single release from a repository.
//
-// GitHub API docs : https://developer.github.com/v3/repos/releases/#delete-a-release
-func (s *RepositoriesService) DeleteRelease(owner, repo string, id int) (*Response, error) {
+// GitHub API docs: https://developer.github.com/v3/repos/releases/#delete-a-release
+func (s *RepositoriesService) DeleteRelease(ctx context.Context, owner, repo string, id int) (*Response, error) {
u := fmt.Sprintf("repos/%s/%s/releases/%d", owner, repo, id)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
// ListReleaseAssets lists the release's assets.
//
-// GitHub API docs : https://developer.github.com/v3/repos/releases/#list-assets-for-a-release
-func (s *RepositoriesService) ListReleaseAssets(owner, repo string, id int, opt *ListOptions) ([]*ReleaseAsset, *Response, error) {
+// GitHub API docs: https://developer.github.com/v3/repos/releases/#list-assets-for-a-release
+func (s *RepositoriesService) ListReleaseAssets(ctx context.Context, owner, repo string, id int, opt *ListOptions) ([]*ReleaseAsset, *Response, error) {
u := fmt.Sprintf("repos/%s/%s/releases/%d/assets", owner, repo, id)
u, err := addOptions(u, opt)
if err != nil {
@@ -189,7 +190,7 @@ func (s *RepositoriesService) ListReleaseAssets(owner, repo string, id int, opt
}
var assets []*ReleaseAsset
- resp, err := s.client.Do(req, &assets)
+ resp, err := s.client.Do(ctx, req, &assets)
if err != nil {
return nil, resp, err
}
@@ -198,8 +199,8 @@ func (s *RepositoriesService) ListReleaseAssets(owner, repo string, id int, opt
// GetReleaseAsset fetches a single release asset.
//
-// GitHub API docs : https://developer.github.com/v3/repos/releases/#get-a-single-release-asset
-func (s *RepositoriesService) GetReleaseAsset(owner, repo string, id int) (*ReleaseAsset, *Response, error) {
+// GitHub API docs: https://developer.github.com/v3/repos/releases/#get-a-single-release-asset
+func (s *RepositoriesService) GetReleaseAsset(ctx context.Context, owner, repo string, id int) (*ReleaseAsset, *Response, error) {
u := fmt.Sprintf("repos/%s/%s/releases/assets/%d", owner, repo, id)
req, err := s.client.NewRequest("GET", u, nil)
@@ -208,7 +209,7 @@ func (s *RepositoriesService) GetReleaseAsset(owner, repo string, id int) (*Rele
}
asset := new(ReleaseAsset)
- resp, err := s.client.Do(req, asset)
+ resp, err := s.client.Do(ctx, req, asset)
if err != nil {
return nil, resp, err
}
@@ -222,8 +223,8 @@ func (s *RepositoriesService) GetReleaseAsset(owner, repo string, id int) (*Rele
// If a redirect is returned, the redirect URL will be returned as a string instead
// of the io.ReadCloser. Exactly one of rc and redirectURL will be zero.
//
-// GitHub API docs : https://developer.github.com/v3/repos/releases/#get-a-single-release-asset
-func (s *RepositoriesService) DownloadReleaseAsset(owner, repo string, id int) (rc io.ReadCloser, redirectURL string, err error) {
+// GitHub API docs: https://developer.github.com/v3/repos/releases/#get-a-single-release-asset
+func (s *RepositoriesService) DownloadReleaseAsset(ctx context.Context, owner, repo string, id int) (rc io.ReadCloser, redirectURL string, err error) {
u := fmt.Sprintf("repos/%s/%s/releases/assets/%d", owner, repo, id)
req, err := s.client.NewRequest("GET", u, nil)
@@ -243,6 +244,7 @@ func (s *RepositoriesService) DownloadReleaseAsset(owner, repo string, id int) (
}
defer func() { s.client.client.CheckRedirect = saveRedirect }()
+ ctx, req = withContext(ctx, req)
resp, err := s.client.client.Do(req)
if err != nil {
if !strings.Contains(err.Error(), "disable redirect") {
@@ -261,8 +263,8 @@ func (s *RepositoriesService) DownloadReleaseAsset(owner, repo string, id int) (
// EditReleaseAsset edits a repository release asset.
//
-// GitHub API docs : https://developer.github.com/v3/repos/releases/#edit-a-release-asset
-func (s *RepositoriesService) EditReleaseAsset(owner, repo string, id int, release *ReleaseAsset) (*ReleaseAsset, *Response, error) {
+// GitHub API docs: https://developer.github.com/v3/repos/releases/#edit-a-release-asset
+func (s *RepositoriesService) EditReleaseAsset(ctx context.Context, owner, repo string, id int, release *ReleaseAsset) (*ReleaseAsset, *Response, error) {
u := fmt.Sprintf("repos/%s/%s/releases/assets/%d", owner, repo, id)
req, err := s.client.NewRequest("PATCH", u, release)
@@ -271,7 +273,7 @@ func (s *RepositoriesService) EditReleaseAsset(owner, repo string, id int, relea
}
asset := new(ReleaseAsset)
- resp, err := s.client.Do(req, asset)
+ resp, err := s.client.Do(ctx, req, asset)
if err != nil {
return nil, resp, err
}
@@ -280,22 +282,22 @@ func (s *RepositoriesService) EditReleaseAsset(owner, repo string, id int, relea
// DeleteReleaseAsset delete a single release asset from a repository.
//
-// GitHub API docs : https://developer.github.com/v3/repos/releases/#delete-a-release-asset
-func (s *RepositoriesService) DeleteReleaseAsset(owner, repo string, id int) (*Response, error) {
+// GitHub API docs: https://developer.github.com/v3/repos/releases/#delete-a-release-asset
+func (s *RepositoriesService) DeleteReleaseAsset(ctx context.Context, owner, repo string, id int) (*Response, error) {
u := fmt.Sprintf("repos/%s/%s/releases/assets/%d", owner, repo, id)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
// UploadReleaseAsset creates an asset by uploading a file into a release repository.
// To upload assets that cannot be represented by an os.File, call NewUploadRequest directly.
//
-// GitHub API docs : https://developer.github.com/v3/repos/releases/#upload-a-release-asset
-func (s *RepositoriesService) UploadReleaseAsset(owner, repo string, id int, opt *UploadOptions, file *os.File) (*ReleaseAsset, *Response, error) {
+// GitHub API docs: https://developer.github.com/v3/repos/releases/#upload-a-release-asset
+func (s *RepositoriesService) UploadReleaseAsset(ctx context.Context, owner, repo string, id int, opt *UploadOptions, file *os.File) (*ReleaseAsset, *Response, error) {
u := fmt.Sprintf("repos/%s/%s/releases/%d/assets", owner, repo, id)
u, err := addOptions(u, opt)
if err != nil {
@@ -317,7 +319,7 @@ func (s *RepositoriesService) UploadReleaseAsset(owner, repo string, id int, opt
}
asset := new(ReleaseAsset)
- resp, err := s.client.Do(req, asset)
+ resp, err := s.client.Do(ctx, req, asset)
if err != nil {
return nil, resp, err
}
diff --git a/vendor/github.com/google/go-github/github/repos_stats.go b/vendor/github.com/google/go-github/github/repos_stats.go
index 5ed1dec..30fc7bd 100644
--- a/vendor/github.com/google/go-github/github/repos_stats.go
+++ b/vendor/github.com/google/go-github/github/repos_stats.go
@@ -6,6 +6,7 @@
package github
import (
+ "context"
"fmt"
"time"
)
@@ -44,8 +45,8 @@ func (w WeeklyStats) String() string {
// it is now computing the requested statistics. A follow up request, after a
// delay of a second or so, should result in a successful request.
//
-// GitHub API Docs: https://developer.github.com/v3/repos/statistics/#contributors
-func (s *RepositoriesService) ListContributorsStats(owner, repo string) ([]*ContributorStats, *Response, error) {
+// GitHub API docs: https://developer.github.com/v3/repos/statistics/#contributors
+func (s *RepositoriesService) ListContributorsStats(ctx context.Context, owner, repo string) ([]*ContributorStats, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/stats/contributors", owner, repo)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -53,7 +54,7 @@ func (s *RepositoriesService) ListContributorsStats(owner, repo string) ([]*Cont
}
var contributorStats []*ContributorStats
- resp, err := s.client.Do(req, &contributorStats)
+ resp, err := s.client.Do(ctx, req, &contributorStats)
if err != nil {
return nil, resp, err
}
@@ -83,8 +84,8 @@ func (w WeeklyCommitActivity) String() string {
// it is now computing the requested statistics. A follow up request, after a
// delay of a second or so, should result in a successful request.
//
-// GitHub API Docs: https://developer.github.com/v3/repos/statistics/#commit-activity
-func (s *RepositoriesService) ListCommitActivity(owner, repo string) ([]*WeeklyCommitActivity, *Response, error) {
+// GitHub API docs: https://developer.github.com/v3/repos/statistics/#commit-activity
+func (s *RepositoriesService) ListCommitActivity(ctx context.Context, owner, repo string) ([]*WeeklyCommitActivity, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/stats/commit_activity", owner, repo)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -92,7 +93,7 @@ func (s *RepositoriesService) ListCommitActivity(owner, repo string) ([]*WeeklyC
}
var weeklyCommitActivity []*WeeklyCommitActivity
- resp, err := s.client.Do(req, &weeklyCommitActivity)
+ resp, err := s.client.Do(ctx, req, &weeklyCommitActivity)
if err != nil {
return nil, resp, err
}
@@ -110,8 +111,8 @@ func (s *RepositoriesService) ListCommitActivity(owner, repo string) ([]*WeeklyC
// it is now computing the requested statistics. A follow up request, after a
// delay of a second or so, should result in a successful request.
//
-// GitHub API Docs: https://developer.github.com/v3/repos/statistics/#code-frequency
-func (s *RepositoriesService) ListCodeFrequency(owner, repo string) ([]*WeeklyStats, *Response, error) {
+// GitHub API docs: https://developer.github.com/v3/repos/statistics/#code-frequency
+func (s *RepositoriesService) ListCodeFrequency(ctx context.Context, owner, repo string) ([]*WeeklyStats, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/stats/code_frequency", owner, repo)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -119,7 +120,7 @@ func (s *RepositoriesService) ListCodeFrequency(owner, repo string) ([]*WeeklySt
}
var weeks [][]int
- resp, err := s.client.Do(req, &weeks)
+ resp, err := s.client.Do(ctx, req, &weeks)
// convert int slices into WeeklyStats
var stats []*WeeklyStats
@@ -163,8 +164,8 @@ func (r RepositoryParticipation) String() string {
// it is now computing the requested statistics. A follow up request, after a
// delay of a second or so, should result in a successful request.
//
-// GitHub API Docs: https://developer.github.com/v3/repos/statistics/#participation
-func (s *RepositoriesService) ListParticipation(owner, repo string) (*RepositoryParticipation, *Response, error) {
+// GitHub API docs: https://developer.github.com/v3/repos/statistics/#participation
+func (s *RepositoriesService) ListParticipation(ctx context.Context, owner, repo string) (*RepositoryParticipation, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/stats/participation", owner, repo)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -172,7 +173,7 @@ func (s *RepositoriesService) ListParticipation(owner, repo string) (*Repository
}
participation := new(RepositoryParticipation)
- resp, err := s.client.Do(req, participation)
+ resp, err := s.client.Do(ctx, req, participation)
if err != nil {
return nil, resp, err
}
@@ -196,8 +197,8 @@ type PunchCard struct {
// it is now computing the requested statistics. A follow up request, after a
// delay of a second or so, should result in a successful request.
//
-// GitHub API Docs: https://developer.github.com/v3/repos/statistics/#punch-card
-func (s *RepositoriesService) ListPunchCard(owner, repo string) ([]*PunchCard, *Response, error) {
+// GitHub API docs: https://developer.github.com/v3/repos/statistics/#punch-card
+func (s *RepositoriesService) ListPunchCard(ctx context.Context, owner, repo string) ([]*PunchCard, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/stats/punch_card", owner, repo)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -205,7 +206,7 @@ func (s *RepositoriesService) ListPunchCard(owner, repo string) ([]*PunchCard, *
}
var results [][]int
- resp, err := s.client.Do(req, &results)
+ resp, err := s.client.Do(ctx, req, &results)
// convert int slices into Punchcards
var cards []*PunchCard
diff --git a/vendor/github.com/google/go-github/github/repos_statuses.go b/vendor/github.com/google/go-github/github/repos_statuses.go
index 65c2e0a..6db5010 100644
--- a/vendor/github.com/google/go-github/github/repos_statuses.go
+++ b/vendor/github.com/google/go-github/github/repos_statuses.go
@@ -6,6 +6,7 @@
package github
import (
+ "context"
"fmt"
"time"
)
@@ -42,7 +43,7 @@ func (r RepoStatus) String() string {
// reference. ref can be a SHA, a branch name, or a tag name.
//
// GitHub API docs: https://developer.github.com/v3/repos/statuses/#list-statuses-for-a-specific-ref
-func (s *RepositoriesService) ListStatuses(owner, repo, ref string, opt *ListOptions) ([]*RepoStatus, *Response, error) {
+func (s *RepositoriesService) ListStatuses(ctx context.Context, owner, repo, ref string, opt *ListOptions) ([]*RepoStatus, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/commits/%v/statuses", owner, repo, ref)
u, err := addOptions(u, opt)
if err != nil {
@@ -55,7 +56,7 @@ func (s *RepositoriesService) ListStatuses(owner, repo, ref string, opt *ListOpt
}
var statuses []*RepoStatus
- resp, err := s.client.Do(req, &statuses)
+ resp, err := s.client.Do(ctx, req, &statuses)
if err != nil {
return nil, resp, err
}
@@ -67,7 +68,7 @@ func (s *RepositoriesService) ListStatuses(owner, repo, ref string, opt *ListOpt
// reference. Ref can be a SHA, a branch name, or a tag name.
//
// GitHub API docs: https://developer.github.com/v3/repos/statuses/#create-a-status
-func (s *RepositoriesService) CreateStatus(owner, repo, ref string, status *RepoStatus) (*RepoStatus, *Response, error) {
+func (s *RepositoriesService) CreateStatus(ctx context.Context, owner, repo, ref string, status *RepoStatus) (*RepoStatus, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/statuses/%v", owner, repo, ref)
req, err := s.client.NewRequest("POST", u, status)
if err != nil {
@@ -75,7 +76,7 @@ func (s *RepositoriesService) CreateStatus(owner, repo, ref string, status *Repo
}
repoStatus := new(RepoStatus)
- resp, err := s.client.Do(req, repoStatus)
+ resp, err := s.client.Do(ctx, req, repoStatus)
if err != nil {
return nil, resp, err
}
@@ -106,7 +107,7 @@ func (s CombinedStatus) String() string {
// reference. ref can be a SHA, a branch name, or a tag name.
//
// GitHub API docs: https://developer.github.com/v3/repos/statuses/#get-the-combined-status-for-a-specific-ref
-func (s *RepositoriesService) GetCombinedStatus(owner, repo, ref string, opt *ListOptions) (*CombinedStatus, *Response, error) {
+func (s *RepositoriesService) GetCombinedStatus(ctx context.Context, owner, repo, ref string, opt *ListOptions) (*CombinedStatus, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/commits/%v/status", owner, repo, ref)
u, err := addOptions(u, opt)
if err != nil {
@@ -119,7 +120,7 @@ func (s *RepositoriesService) GetCombinedStatus(owner, repo, ref string, opt *Li
}
status := new(CombinedStatus)
- resp, err := s.client.Do(req, status)
+ resp, err := s.client.Do(ctx, req, status)
if err != nil {
return nil, resp, err
}
diff --git a/vendor/github.com/google/go-github/github/repos_traffic.go b/vendor/github.com/google/go-github/github/repos_traffic.go
index 67341b9..fb1c976 100644
--- a/vendor/github.com/google/go-github/github/repos_traffic.go
+++ b/vendor/github.com/google/go-github/github/repos_traffic.go
@@ -5,7 +5,10 @@
package github
-import "fmt"
+import (
+ "context"
+ "fmt"
+)
// TrafficReferrer represent information about traffic from a referrer .
type TrafficReferrer struct {
@@ -52,7 +55,7 @@ type TrafficBreakdownOptions struct {
// ListTrafficReferrers list the top 10 referrers over the last 14 days.
//
// GitHub API docs: https://developer.github.com/v3/repos/traffic/#list-referrers
-func (s *RepositoriesService) ListTrafficReferrers(owner, repo string) ([]*TrafficReferrer, *Response, error) {
+func (s *RepositoriesService) ListTrafficReferrers(ctx context.Context, owner, repo string) ([]*TrafficReferrer, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/traffic/popular/referrers", owner, repo)
req, err := s.client.NewRequest("GET", u, nil)
@@ -61,7 +64,7 @@ func (s *RepositoriesService) ListTrafficReferrers(owner, repo string) ([]*Traff
}
var trafficReferrers []*TrafficReferrer
- resp, err := s.client.Do(req, &trafficReferrers)
+ resp, err := s.client.Do(ctx, req, &trafficReferrers)
if err != nil {
return nil, resp, err
}
@@ -72,7 +75,7 @@ func (s *RepositoriesService) ListTrafficReferrers(owner, repo string) ([]*Traff
// ListTrafficPaths list the top 10 popular content over the last 14 days.
//
// GitHub API docs: https://developer.github.com/v3/repos/traffic/#list-paths
-func (s *RepositoriesService) ListTrafficPaths(owner, repo string) ([]*TrafficPath, *Response, error) {
+func (s *RepositoriesService) ListTrafficPaths(ctx context.Context, owner, repo string) ([]*TrafficPath, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/traffic/popular/paths", owner, repo)
req, err := s.client.NewRequest("GET", u, nil)
@@ -81,7 +84,7 @@ func (s *RepositoriesService) ListTrafficPaths(owner, repo string) ([]*TrafficPa
}
var paths []*TrafficPath
- resp, err := s.client.Do(req, &paths)
+ resp, err := s.client.Do(ctx, req, &paths)
if err != nil {
return nil, resp, err
}
@@ -92,7 +95,7 @@ func (s *RepositoriesService) ListTrafficPaths(owner, repo string) ([]*TrafficPa
// ListTrafficViews get total number of views for the last 14 days and breaks it down either per day or week.
//
// GitHub API docs: https://developer.github.com/v3/repos/traffic/#views
-func (s *RepositoriesService) ListTrafficViews(owner, repo string, opt *TrafficBreakdownOptions) (*TrafficViews, *Response, error) {
+func (s *RepositoriesService) ListTrafficViews(ctx context.Context, owner, repo string, opt *TrafficBreakdownOptions) (*TrafficViews, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/traffic/views", owner, repo)
u, err := addOptions(u, opt)
if err != nil {
@@ -105,7 +108,7 @@ func (s *RepositoriesService) ListTrafficViews(owner, repo string, opt *TrafficB
}
trafficViews := new(TrafficViews)
- resp, err := s.client.Do(req, &trafficViews)
+ resp, err := s.client.Do(ctx, req, &trafficViews)
if err != nil {
return nil, resp, err
}
@@ -116,7 +119,7 @@ func (s *RepositoriesService) ListTrafficViews(owner, repo string, opt *TrafficB
// ListTrafficClones get total number of clones for the last 14 days and breaks it down either per day or week for the last 14 days.
//
// GitHub API docs: https://developer.github.com/v3/repos/traffic/#views
-func (s *RepositoriesService) ListTrafficClones(owner, repo string, opt *TrafficBreakdownOptions) (*TrafficClones, *Response, error) {
+func (s *RepositoriesService) ListTrafficClones(ctx context.Context, owner, repo string, opt *TrafficBreakdownOptions) (*TrafficClones, *Response, error) {
u := fmt.Sprintf("repos/%v/%v/traffic/clones", owner, repo)
u, err := addOptions(u, opt)
if err != nil {
@@ -129,7 +132,7 @@ func (s *RepositoriesService) ListTrafficClones(owner, repo string, opt *Traffic
}
trafficClones := new(TrafficClones)
- resp, err := s.client.Do(req, &trafficClones)
+ resp, err := s.client.Do(ctx, req, &trafficClones)
if err != nil {
return nil, resp, err
}
diff --git a/vendor/github.com/google/go-github/github/search.go b/vendor/github.com/google/go-github/github/search.go
index e48f3ea..7668b8b 100644
--- a/vendor/github.com/google/go-github/github/search.go
+++ b/vendor/github.com/google/go-github/github/search.go
@@ -6,6 +6,7 @@
package github
import (
+ "context"
"fmt"
qs "github.com/google/go-querystring/query"
@@ -49,9 +50,9 @@ type RepositoriesSearchResult struct {
// Repositories searches repositories via various criteria.
//
// GitHub API docs: https://developer.github.com/v3/search/#search-repositories
-func (s *SearchService) Repositories(query string, opt *SearchOptions) (*RepositoriesSearchResult, *Response, error) {
+func (s *SearchService) Repositories(ctx context.Context, query string, opt *SearchOptions) (*RepositoriesSearchResult, *Response, error) {
result := new(RepositoriesSearchResult)
- resp, err := s.search("repositories", query, opt, result)
+ resp, err := s.search(ctx, "repositories", query, opt, result)
return result, resp, err
}
@@ -64,25 +65,25 @@ type CommitsSearchResult struct {
// CommitResult represents a commit object as returned in commit search endpoint response.
type CommitResult struct {
- Hash *string `json:"hash,omitempty"`
- Message *string `json:"message,omitempty"`
- AuthorID *int `json:"author_id,omitempty"`
- AuthorName *string `json:"author_name,omitempty"`
- AuthorEmail *string `json:"author_email,omitempty"`
- AuthorDate *Timestamp `json:"author_date,omitempty"`
- CommitterID *int `json:"committer_id,omitempty"`
- CommitterName *string `json:"committer_name,omitempty"`
- CommitterEmail *string `json:"committer_email,omitempty"`
- CommitterDate *Timestamp `json:"committer_date,omitempty"`
- Repository *Repository `json:"repository,omitempty"`
+ SHA *string `json:"sha,omitempty"`
+ Commit *Commit `json:"commit,omitempty"`
+ Author *User `json:"author,omitempty"`
+ Committer *User `json:"committer,omitempty"`
+ Parents []*Commit `json:"parents,omitempty"`
+ HTMLURL *string `json:"html_url,omitempty"`
+ URL *string `json:"url,omitempty"`
+ CommentsURL *string `json:"comments_url,omitempty"`
+
+ Repository *Repository `json:"repository,omitempty"`
+ Score *float64 `json:"score,omitempty"`
}
// Commits searches commits via various criteria.
//
-// GitHub API Docs: https://developer.github.com/v3/search/#search-commits
-func (s *SearchService) Commits(query string, opt *SearchOptions) (*CommitsSearchResult, *Response, error) {
+// GitHub API docs: https://developer.github.com/v3/search/#search-commits
+func (s *SearchService) Commits(ctx context.Context, query string, opt *SearchOptions) (*CommitsSearchResult, *Response, error) {
result := new(CommitsSearchResult)
- resp, err := s.search("commits", query, opt, result)
+ resp, err := s.search(ctx, "commits", query, opt, result)
return result, resp, err
}
@@ -96,9 +97,9 @@ type IssuesSearchResult struct {
// Issues searches issues via various criteria.
//
// GitHub API docs: https://developer.github.com/v3/search/#search-issues
-func (s *SearchService) Issues(query string, opt *SearchOptions) (*IssuesSearchResult, *Response, error) {
+func (s *SearchService) Issues(ctx context.Context, query string, opt *SearchOptions) (*IssuesSearchResult, *Response, error) {
result := new(IssuesSearchResult)
- resp, err := s.search("issues", query, opt, result)
+ resp, err := s.search(ctx, "issues", query, opt, result)
return result, resp, err
}
@@ -112,9 +113,9 @@ type UsersSearchResult struct {
// Users searches users via various criteria.
//
// GitHub API docs: https://developer.github.com/v3/search/#search-users
-func (s *SearchService) Users(query string, opt *SearchOptions) (*UsersSearchResult, *Response, error) {
+func (s *SearchService) Users(ctx context.Context, query string, opt *SearchOptions) (*UsersSearchResult, *Response, error) {
result := new(UsersSearchResult)
- resp, err := s.search("users", query, opt, result)
+ resp, err := s.search(ctx, "users", query, opt, result)
return result, resp, err
}
@@ -161,20 +162,20 @@ func (c CodeResult) String() string {
// Code searches code via various criteria.
//
// GitHub API docs: https://developer.github.com/v3/search/#search-code
-func (s *SearchService) Code(query string, opt *SearchOptions) (*CodeSearchResult, *Response, error) {
+func (s *SearchService) Code(ctx context.Context, query string, opt *SearchOptions) (*CodeSearchResult, *Response, error) {
result := new(CodeSearchResult)
- resp, err := s.search("code", query, opt, result)
+ resp, err := s.search(ctx, "code", query, opt, result)
return result, resp, err
}
// Helper function that executes search queries against different
// GitHub search types (repositories, commits, code, issues, users)
-func (s *SearchService) search(searchType string, query string, opt *SearchOptions, result interface{}) (*Response, error) {
+func (s *SearchService) search(ctx context.Context, searchType string, query string, opt *SearchOptions, result interface{}) (*Response, error) {
params, err := qs.Values(opt)
if err != nil {
return nil, err
}
- params.Add("q", query)
+ params.Set("q", query)
u := fmt.Sprintf("search/%s?%s", searchType, params.Encode())
req, err := s.client.NewRequest("GET", u, nil)
@@ -193,5 +194,5 @@ func (s *SearchService) search(searchType string, query string, opt *SearchOptio
req.Header.Set("Accept", "application/vnd.github.v3.text-match+json")
}
- return s.client.Do(req, result)
+ return s.client.Do(ctx, req, result)
}
diff --git a/vendor/github.com/google/go-github/github/users.go b/vendor/github.com/google/go-github/github/users.go
index 7639cb2..d74439c 100644
--- a/vendor/github.com/google/go-github/github/users.go
+++ b/vendor/github.com/google/go-github/github/users.go
@@ -5,7 +5,10 @@
package github
-import "fmt"
+import (
+ "context"
+ "fmt"
+)
// UsersService handles communication with the user related
// methods of the GitHub API.
@@ -72,7 +75,7 @@ func (u User) String() string {
// user.
//
// GitHub API docs: https://developer.github.com/v3/users/#get-a-single-user
-func (s *UsersService) Get(user string) (*User, *Response, error) {
+func (s *UsersService) Get(ctx context.Context, user string) (*User, *Response, error) {
var u string
if user != "" {
u = fmt.Sprintf("users/%v", user)
@@ -85,7 +88,7 @@ func (s *UsersService) Get(user string) (*User, *Response, error) {
}
uResp := new(User)
- resp, err := s.client.Do(req, uResp)
+ resp, err := s.client.Do(ctx, req, uResp)
if err != nil {
return nil, resp, err
}
@@ -96,7 +99,7 @@ func (s *UsersService) Get(user string) (*User, *Response, error) {
// GetByID fetches a user.
//
// Note: GetByID uses the undocumented GitHub API endpoint /user/:id.
-func (s *UsersService) GetByID(id int) (*User, *Response, error) {
+func (s *UsersService) GetByID(ctx context.Context, id int) (*User, *Response, error) {
u := fmt.Sprintf("user/%d", id)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -104,7 +107,7 @@ func (s *UsersService) GetByID(id int) (*User, *Response, error) {
}
user := new(User)
- resp, err := s.client.Do(req, user)
+ resp, err := s.client.Do(ctx, req, user)
if err != nil {
return nil, resp, err
}
@@ -115,7 +118,7 @@ func (s *UsersService) GetByID(id int) (*User, *Response, error) {
// Edit the authenticated user.
//
// GitHub API docs: https://developer.github.com/v3/users/#update-the-authenticated-user
-func (s *UsersService) Edit(user *User) (*User, *Response, error) {
+func (s *UsersService) Edit(ctx context.Context, user *User) (*User, *Response, error) {
u := "user"
req, err := s.client.NewRequest("PATCH", u, user)
if err != nil {
@@ -123,7 +126,7 @@ func (s *UsersService) Edit(user *User) (*User, *Response, error) {
}
uResp := new(User)
- resp, err := s.client.Do(req, uResp)
+ resp, err := s.client.Do(ctx, req, uResp)
if err != nil {
return nil, resp, err
}
@@ -145,7 +148,7 @@ type UserListOptions struct {
// To paginate through all users, populate 'Since' with the ID of the last user.
//
// GitHub API docs: https://developer.github.com/v3/users/#get-all-users
-func (s *UsersService) ListAll(opt *UserListOptions) ([]*User, *Response, error) {
+func (s *UsersService) ListAll(ctx context.Context, opt *UserListOptions) ([]*User, *Response, error) {
u, err := addOptions("users", opt)
if err != nil {
return nil, nil, err
@@ -157,7 +160,7 @@ func (s *UsersService) ListAll(opt *UserListOptions) ([]*User, *Response, error)
}
var users []*User
- resp, err := s.client.Do(req, &users)
+ resp, err := s.client.Do(ctx, req, &users)
if err != nil {
return nil, resp, err
}
@@ -169,7 +172,7 @@ func (s *UsersService) ListAll(opt *UserListOptions) ([]*User, *Response, error)
// authenticated user.
//
// GitHub API docs: https://developer.github.com/v3/repos/invitations/#list-a-users-repository-invitations
-func (s *UsersService) ListInvitations() ([]*RepositoryInvitation, *Response, error) {
+func (s *UsersService) ListInvitations(ctx context.Context) ([]*RepositoryInvitation, *Response, error) {
req, err := s.client.NewRequest("GET", "user/repository_invitations", nil)
if err != nil {
return nil, nil, err
@@ -179,7 +182,7 @@ func (s *UsersService) ListInvitations() ([]*RepositoryInvitation, *Response, er
req.Header.Set("Accept", mediaTypeRepositoryInvitationsPreview)
invites := []*RepositoryInvitation{}
- resp, err := s.client.Do(req, &invites)
+ resp, err := s.client.Do(ctx, req, &invites)
if err != nil {
return nil, resp, err
}
@@ -191,7 +194,7 @@ func (s *UsersService) ListInvitations() ([]*RepositoryInvitation, *Response, er
// authenticated user.
//
// GitHub API docs: https://developer.github.com/v3/repos/invitations/#accept-a-repository-invitation
-func (s *UsersService) AcceptInvitation(invitationID int) (*Response, error) {
+func (s *UsersService) AcceptInvitation(ctx context.Context, invitationID int) (*Response, error) {
u := fmt.Sprintf("user/repository_invitations/%v", invitationID)
req, err := s.client.NewRequest("PATCH", u, nil)
if err != nil {
@@ -201,14 +204,14 @@ func (s *UsersService) AcceptInvitation(invitationID int) (*Response, error) {
// TODO: remove custom Accept header when this API fully launches.
req.Header.Set("Accept", mediaTypeRepositoryInvitationsPreview)
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
// DeclineInvitation declines the currently-open repository invitation for the
// authenticated user.
//
// GitHub API docs: https://developer.github.com/v3/repos/invitations/#decline-a-repository-invitation
-func (s *UsersService) DeclineInvitation(invitationID int) (*Response, error) {
+func (s *UsersService) DeclineInvitation(ctx context.Context, invitationID int) (*Response, error) {
u := fmt.Sprintf("user/repository_invitations/%v", invitationID)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
@@ -218,5 +221,5 @@ func (s *UsersService) DeclineInvitation(invitationID int) (*Response, error) {
// TODO: remove custom Accept header when this API fully launches.
req.Header.Set("Accept", mediaTypeRepositoryInvitationsPreview)
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
diff --git a/vendor/github.com/google/go-github/github/users_administration.go b/vendor/github.com/google/go-github/github/users_administration.go
index dc1dcb8..e042398 100644
--- a/vendor/github.com/google/go-github/github/users_administration.go
+++ b/vendor/github.com/google/go-github/github/users_administration.go
@@ -5,12 +5,15 @@
package github
-import "fmt"
+import (
+ "context"
+ "fmt"
+)
// PromoteSiteAdmin promotes a user to a site administrator of a GitHub Enterprise instance.
//
// GitHub API docs: https://developer.github.com/v3/users/administration/#promote-an-ordinary-user-to-a-site-administrator
-func (s *UsersService) PromoteSiteAdmin(user string) (*Response, error) {
+func (s *UsersService) PromoteSiteAdmin(ctx context.Context, user string) (*Response, error) {
u := fmt.Sprintf("users/%v/site_admin", user)
req, err := s.client.NewRequest("PUT", u, nil)
@@ -18,13 +21,13 @@ func (s *UsersService) PromoteSiteAdmin(user string) (*Response, error) {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
// DemoteSiteAdmin demotes a user from site administrator of a GitHub Enterprise instance.
//
// GitHub API docs: https://developer.github.com/v3/users/administration/#demote-a-site-administrator-to-an-ordinary-user
-func (s *UsersService) DemoteSiteAdmin(user string) (*Response, error) {
+func (s *UsersService) DemoteSiteAdmin(ctx context.Context, user string) (*Response, error) {
u := fmt.Sprintf("users/%v/site_admin", user)
req, err := s.client.NewRequest("DELETE", u, nil)
@@ -32,13 +35,13 @@ func (s *UsersService) DemoteSiteAdmin(user string) (*Response, error) {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
// Suspend a user on a GitHub Enterprise instance.
//
// GitHub API docs: https://developer.github.com/v3/users/administration/#suspend-a-user
-func (s *UsersService) Suspend(user string) (*Response, error) {
+func (s *UsersService) Suspend(ctx context.Context, user string) (*Response, error) {
u := fmt.Sprintf("users/%v/suspended", user)
req, err := s.client.NewRequest("PUT", u, nil)
@@ -46,13 +49,13 @@ func (s *UsersService) Suspend(user string) (*Response, error) {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
// Unsuspend a user on a GitHub Enterprise instance.
//
// GitHub API docs: https://developer.github.com/v3/users/administration/#unsuspend-a-user
-func (s *UsersService) Unsuspend(user string) (*Response, error) {
+func (s *UsersService) Unsuspend(ctx context.Context, user string) (*Response, error) {
u := fmt.Sprintf("users/%v/suspended", user)
req, err := s.client.NewRequest("DELETE", u, nil)
@@ -60,5 +63,5 @@ func (s *UsersService) Unsuspend(user string) (*Response, error) {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
diff --git a/vendor/github.com/google/go-github/github/users_blocking.go b/vendor/github.com/google/go-github/github/users_blocking.go
new file mode 100644
index 0000000..39e4560
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/users_blocking.go
@@ -0,0 +1,91 @@
+// Copyright 2017 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package github
+
+import (
+ "context"
+ "fmt"
+)
+
+// ListBlockedUsers lists all the blocked users by the authenticated user.
+//
+// GitHub API docs: https://developer.github.com/v3/users/blocking/#list-blocked-users
+func (s *UsersService) ListBlockedUsers(ctx context.Context, opt *ListOptions) ([]*User, *Response, error) {
+ u := "user/blocks"
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeBlockUsersPreview)
+
+ var blockedUsers []*User
+ resp, err := s.client.Do(ctx, req, &blockedUsers)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return blockedUsers, resp, nil
+}
+
+// IsBlocked reports whether specified user is blocked by the authenticated user.
+//
+// GitHub API docs: https://developer.github.com/v3/users/blocking/#check-whether-youve-blocked-a-user
+func (s *UsersService) IsBlocked(ctx context.Context, user string) (bool, *Response, error) {
+ u := fmt.Sprintf("user/blocks/%v", user)
+
+ req, err := s.client.NewRequest("GET", u, nil)
+ if err != nil {
+ return false, nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeBlockUsersPreview)
+
+ resp, err := s.client.Do(ctx, req, nil)
+ isBlocked, err := parseBoolResponse(err)
+ return isBlocked, resp, err
+}
+
+// BlockUser blocks specified user for the authenticated user.
+//
+// GitHub API docs: https://developer.github.com/v3/users/blocking/#block-a-user
+func (s *UsersService) BlockUser(ctx context.Context, user string) (*Response, error) {
+ u := fmt.Sprintf("user/blocks/%v", user)
+
+ req, err := s.client.NewRequest("PUT", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeBlockUsersPreview)
+
+ return s.client.Do(ctx, req, nil)
+}
+
+// UnblockUser unblocks specified user for the authenticated user.
+//
+// GitHub API docs: https://developer.github.com/v3/users/blocking/#unblock-a-user
+func (s *UsersService) UnblockUser(ctx context.Context, user string) (*Response, error) {
+ u := fmt.Sprintf("user/blocks/%v", user)
+
+ req, err := s.client.NewRequest("DELETE", u, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: remove custom Accept header when this API fully launches.
+ req.Header.Set("Accept", mediaTypeBlockUsersPreview)
+
+ return s.client.Do(ctx, req, nil)
+}
diff --git a/vendor/github.com/google/go-github/github/users_emails.go b/vendor/github.com/google/go-github/github/users_emails.go
index f236b42..0bbd462 100644
--- a/vendor/github.com/google/go-github/github/users_emails.go
+++ b/vendor/github.com/google/go-github/github/users_emails.go
@@ -5,6 +5,8 @@
package github
+import "context"
+
// UserEmail represents user's email address
type UserEmail struct {
Email *string `json:"email,omitempty"`
@@ -15,7 +17,7 @@ type UserEmail struct {
// ListEmails lists all email addresses for the authenticated user.
//
// GitHub API docs: https://developer.github.com/v3/users/emails/#list-email-addresses-for-a-user
-func (s *UsersService) ListEmails(opt *ListOptions) ([]*UserEmail, *Response, error) {
+func (s *UsersService) ListEmails(ctx context.Context, opt *ListOptions) ([]*UserEmail, *Response, error) {
u := "user/emails"
u, err := addOptions(u, opt)
if err != nil {
@@ -28,7 +30,7 @@ func (s *UsersService) ListEmails(opt *ListOptions) ([]*UserEmail, *Response, er
}
var emails []*UserEmail
- resp, err := s.client.Do(req, &emails)
+ resp, err := s.client.Do(ctx, req, &emails)
if err != nil {
return nil, resp, err
}
@@ -39,7 +41,7 @@ func (s *UsersService) ListEmails(opt *ListOptions) ([]*UserEmail, *Response, er
// AddEmails adds email addresses of the authenticated user.
//
// GitHub API docs: https://developer.github.com/v3/users/emails/#add-email-addresses
-func (s *UsersService) AddEmails(emails []string) ([]*UserEmail, *Response, error) {
+func (s *UsersService) AddEmails(ctx context.Context, emails []string) ([]*UserEmail, *Response, error) {
u := "user/emails"
req, err := s.client.NewRequest("POST", u, emails)
if err != nil {
@@ -47,7 +49,7 @@ func (s *UsersService) AddEmails(emails []string) ([]*UserEmail, *Response, erro
}
var e []*UserEmail
- resp, err := s.client.Do(req, &e)
+ resp, err := s.client.Do(ctx, req, &e)
if err != nil {
return nil, resp, err
}
@@ -58,12 +60,12 @@ func (s *UsersService) AddEmails(emails []string) ([]*UserEmail, *Response, erro
// DeleteEmails deletes email addresses from authenticated user.
//
// GitHub API docs: https://developer.github.com/v3/users/emails/#delete-email-addresses
-func (s *UsersService) DeleteEmails(emails []string) (*Response, error) {
+func (s *UsersService) DeleteEmails(ctx context.Context, emails []string) (*Response, error) {
u := "user/emails"
req, err := s.client.NewRequest("DELETE", u, emails)
if err != nil {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
diff --git a/vendor/github.com/google/go-github/github/users_followers.go b/vendor/github.com/google/go-github/github/users_followers.go
index 9e81b60..c222409 100644
--- a/vendor/github.com/google/go-github/github/users_followers.go
+++ b/vendor/github.com/google/go-github/github/users_followers.go
@@ -5,13 +5,16 @@
package github
-import "fmt"
+import (
+ "context"
+ "fmt"
+)
// ListFollowers lists the followers for a user. Passing the empty string will
// fetch followers for the authenticated user.
//
// GitHub API docs: https://developer.github.com/v3/users/followers/#list-followers-of-a-user
-func (s *UsersService) ListFollowers(user string, opt *ListOptions) ([]*User, *Response, error) {
+func (s *UsersService) ListFollowers(ctx context.Context, user string, opt *ListOptions) ([]*User, *Response, error) {
var u string
if user != "" {
u = fmt.Sprintf("users/%v/followers", user)
@@ -29,7 +32,7 @@ func (s *UsersService) ListFollowers(user string, opt *ListOptions) ([]*User, *R
}
var users []*User
- resp, err := s.client.Do(req, &users)
+ resp, err := s.client.Do(ctx, req, &users)
if err != nil {
return nil, resp, err
}
@@ -41,7 +44,7 @@ func (s *UsersService) ListFollowers(user string, opt *ListOptions) ([]*User, *R
// string will list people the authenticated user is following.
//
// GitHub API docs: https://developer.github.com/v3/users/followers/#list-users-followed-by-another-user
-func (s *UsersService) ListFollowing(user string, opt *ListOptions) ([]*User, *Response, error) {
+func (s *UsersService) ListFollowing(ctx context.Context, user string, opt *ListOptions) ([]*User, *Response, error) {
var u string
if user != "" {
u = fmt.Sprintf("users/%v/following", user)
@@ -59,7 +62,7 @@ func (s *UsersService) ListFollowing(user string, opt *ListOptions) ([]*User, *R
}
var users []*User
- resp, err := s.client.Do(req, &users)
+ resp, err := s.client.Do(ctx, req, &users)
if err != nil {
return nil, resp, err
}
@@ -71,7 +74,7 @@ func (s *UsersService) ListFollowing(user string, opt *ListOptions) ([]*User, *R
// string for "user" will check if the authenticated user is following "target".
//
// GitHub API docs: https://developer.github.com/v3/users/followers/#check-if-you-are-following-a-user
-func (s *UsersService) IsFollowing(user, target string) (bool, *Response, error) {
+func (s *UsersService) IsFollowing(ctx context.Context, user, target string) (bool, *Response, error) {
var u string
if user != "" {
u = fmt.Sprintf("users/%v/following/%v", user, target)
@@ -84,7 +87,7 @@ func (s *UsersService) IsFollowing(user, target string) (bool, *Response, error)
return false, nil, err
}
- resp, err := s.client.Do(req, nil)
+ resp, err := s.client.Do(ctx, req, nil)
following, err := parseBoolResponse(err)
return following, resp, err
}
@@ -92,25 +95,25 @@ func (s *UsersService) IsFollowing(user, target string) (bool, *Response, error)
// Follow will cause the authenticated user to follow the specified user.
//
// GitHub API docs: https://developer.github.com/v3/users/followers/#follow-a-user
-func (s *UsersService) Follow(user string) (*Response, error) {
+func (s *UsersService) Follow(ctx context.Context, user string) (*Response, error) {
u := fmt.Sprintf("user/following/%v", user)
req, err := s.client.NewRequest("PUT", u, nil)
if err != nil {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
// Unfollow will cause the authenticated user to unfollow the specified user.
//
// GitHub API docs: https://developer.github.com/v3/users/followers/#unfollow-a-user
-func (s *UsersService) Unfollow(user string) (*Response, error) {
+func (s *UsersService) Unfollow(ctx context.Context, user string) (*Response, error) {
u := fmt.Sprintf("user/following/%v", user)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
diff --git a/vendor/github.com/google/go-github/github/users_gpg_keys.go b/vendor/github.com/google/go-github/github/users_gpg_keys.go
index 1576365..be88c04 100644
--- a/vendor/github.com/google/go-github/github/users_gpg_keys.go
+++ b/vendor/github.com/google/go-github/github/users_gpg_keys.go
@@ -6,6 +6,7 @@
package github
import (
+ "context"
"fmt"
"time"
)
@@ -39,12 +40,24 @@ type GPGEmail struct {
Verified *bool `json:"verified,omitempty"`
}
-// ListGPGKeys lists the current user's GPG keys. It requires authentication
+// ListGPGKeys lists the public GPG keys for a user. Passing the empty
+// string will fetch keys for the authenticated user. It requires authentication
// via Basic Auth or via OAuth with at least read:gpg_key scope.
-//
-// GitHub API docs: https://developer.github.com/v3/users/gpg_keys/#list-your-gpg-keys
-func (s *UsersService) ListGPGKeys() ([]*GPGKey, *Response, error) {
- req, err := s.client.NewRequest("GET", "user/gpg_keys", nil)
+
+// GitHub API docs: https://developer.github.com/v3/users/gpg_keys/#list-gpg-keys-for-a-user
+func (s *UsersService) ListGPGKeys(ctx context.Context, user string, opt *ListOptions) ([]*GPGKey, *Response, error) {
+ var u string
+ if user != "" {
+ u = fmt.Sprintf("users/%v/gpg_keys", user)
+ } else {
+ u = "user/gpg_keys"
+ }
+ u, err := addOptions(u, opt)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err
}
@@ -53,7 +66,7 @@ func (s *UsersService) ListGPGKeys() ([]*GPGKey, *Response, error) {
req.Header.Set("Accept", mediaTypeGitSigningPreview)
var keys []*GPGKey
- resp, err := s.client.Do(req, &keys)
+ resp, err := s.client.Do(ctx, req, &keys)
if err != nil {
return nil, resp, err
}
@@ -65,7 +78,7 @@ func (s *UsersService) ListGPGKeys() ([]*GPGKey, *Response, error) {
// via Basic Auth or via OAuth with at least read:gpg_key scope.
//
// GitHub API docs: https://developer.github.com/v3/users/gpg_keys/#get-a-single-gpg-key
-func (s *UsersService) GetGPGKey(id int) (*GPGKey, *Response, error) {
+func (s *UsersService) GetGPGKey(ctx context.Context, id int) (*GPGKey, *Response, error) {
u := fmt.Sprintf("user/gpg_keys/%v", id)
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
@@ -76,7 +89,7 @@ func (s *UsersService) GetGPGKey(id int) (*GPGKey, *Response, error) {
req.Header.Set("Accept", mediaTypeGitSigningPreview)
key := &GPGKey{}
- resp, err := s.client.Do(req, key)
+ resp, err := s.client.Do(ctx, req, key)
if err != nil {
return nil, resp, err
}
@@ -88,7 +101,7 @@ func (s *UsersService) GetGPGKey(id int) (*GPGKey, *Response, error) {
// or OAuth with at least write:gpg_key scope.
//
// GitHub API docs: https://developer.github.com/v3/users/gpg_keys/#create-a-gpg-key
-func (s *UsersService) CreateGPGKey(armoredPublicKey string) (*GPGKey, *Response, error) {
+func (s *UsersService) CreateGPGKey(ctx context.Context, armoredPublicKey string) (*GPGKey, *Response, error) {
gpgKey := &struct {
ArmoredPublicKey string `json:"armored_public_key"`
}{ArmoredPublicKey: armoredPublicKey}
@@ -101,7 +114,7 @@ func (s *UsersService) CreateGPGKey(armoredPublicKey string) (*GPGKey, *Response
req.Header.Set("Accept", mediaTypeGitSigningPreview)
key := &GPGKey{}
- resp, err := s.client.Do(req, key)
+ resp, err := s.client.Do(ctx, req, key)
if err != nil {
return nil, resp, err
}
@@ -113,7 +126,7 @@ func (s *UsersService) CreateGPGKey(armoredPublicKey string) (*GPGKey, *Response
// via OAuth with at least admin:gpg_key scope.
//
// GitHub API docs: https://developer.github.com/v3/users/gpg_keys/#delete-a-gpg-key
-func (s *UsersService) DeleteGPGKey(id int) (*Response, error) {
+func (s *UsersService) DeleteGPGKey(ctx context.Context, id int) (*Response, error) {
u := fmt.Sprintf("user/gpg_keys/%v", id)
req, err := s.client.NewRequest("DELETE", u, nil)
if err != nil {
@@ -123,5 +136,5 @@ func (s *UsersService) DeleteGPGKey(id int) (*Response, error) {
// TODO: remove custom Accept header when this API fully launches.
req.Header.Set("Accept", mediaTypeGitSigningPreview)
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
diff --git a/vendor/github.com/google/go-github/github/users_keys.go b/vendor/github.com/google/go-github/github/users_keys.go
index ebc333f..97ed4b8 100644
--- a/vendor/github.com/google/go-github/github/users_keys.go
+++ b/vendor/github.com/google/go-github/github/users_keys.go
@@ -5,7 +5,10 @@
package github
-import "fmt"
+import (
+ "context"
+ "fmt"
+)
// Key represents a public SSH key used to authenticate a user or deploy script.
type Key struct {
@@ -24,7 +27,7 @@ func (k Key) String() string {
// string will fetch keys for the authenticated user.
//
// GitHub API docs: https://developer.github.com/v3/users/keys/#list-public-keys-for-a-user
-func (s *UsersService) ListKeys(user string, opt *ListOptions) ([]*Key, *Response, error) {
+func (s *UsersService) ListKeys(ctx context.Context, user string, opt *ListOptions) ([]*Key, *Response, error) {
var u string
if user != "" {
u = fmt.Sprintf("users/%v/keys", user)
@@ -42,7 +45,7 @@ func (s *UsersService) ListKeys(user string, opt *ListOptions) ([]*Key, *Respons
}
var keys []*Key
- resp, err := s.client.Do(req, &keys)
+ resp, err := s.client.Do(ctx, req, &keys)
if err != nil {
return nil, resp, err
}
@@ -53,7 +56,7 @@ func (s *UsersService) ListKeys(user string, opt *ListOptions) ([]*Key, *Respons
// GetKey fetches a single public key.
//
// GitHub API docs: https://developer.github.com/v3/users/keys/#get-a-single-public-key
-func (s *UsersService) GetKey(id int) (*Key, *Response, error) {
+func (s *UsersService) GetKey(ctx context.Context, id int) (*Key, *Response, error) {
u := fmt.Sprintf("user/keys/%v", id)
req, err := s.client.NewRequest("GET", u, nil)
@@ -62,7 +65,7 @@ func (s *UsersService) GetKey(id int) (*Key, *Response, error) {
}
key := new(Key)
- resp, err := s.client.Do(req, key)
+ resp, err := s.client.Do(ctx, req, key)
if err != nil {
return nil, resp, err
}
@@ -73,7 +76,7 @@ func (s *UsersService) GetKey(id int) (*Key, *Response, error) {
// CreateKey adds a public key for the authenticated user.
//
// GitHub API docs: https://developer.github.com/v3/users/keys/#create-a-public-key
-func (s *UsersService) CreateKey(key *Key) (*Key, *Response, error) {
+func (s *UsersService) CreateKey(ctx context.Context, key *Key) (*Key, *Response, error) {
u := "user/keys"
req, err := s.client.NewRequest("POST", u, key)
@@ -82,7 +85,7 @@ func (s *UsersService) CreateKey(key *Key) (*Key, *Response, error) {
}
k := new(Key)
- resp, err := s.client.Do(req, k)
+ resp, err := s.client.Do(ctx, req, k)
if err != nil {
return nil, resp, err
}
@@ -93,7 +96,7 @@ func (s *UsersService) CreateKey(key *Key) (*Key, *Response, error) {
// DeleteKey deletes a public key.
//
// GitHub API docs: https://developer.github.com/v3/users/keys/#delete-a-public-key
-func (s *UsersService) DeleteKey(id int) (*Response, error) {
+func (s *UsersService) DeleteKey(ctx context.Context, id int) (*Response, error) {
u := fmt.Sprintf("user/keys/%v", id)
req, err := s.client.NewRequest("DELETE", u, nil)
@@ -101,5 +104,5 @@ func (s *UsersService) DeleteKey(id int) (*Response, error) {
return nil, err
}
- return s.client.Do(req, nil)
+ return s.client.Do(ctx, req, nil)
}
diff --git a/vendor/github.com/google/go-github/github/without_appengine.go b/vendor/github.com/google/go-github/github/without_appengine.go
new file mode 100644
index 0000000..b0edc04
--- /dev/null
+++ b/vendor/github.com/google/go-github/github/without_appengine.go
@@ -0,0 +1,19 @@
+// Copyright 2017 The go-github AUTHORS. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !appengine
+
+// This file provides glue for making github work without App Engine.
+
+package github
+
+import (
+ "context"
+ "net/http"
+)
+
+func withContext(ctx context.Context, req *http.Request) (context.Context, *http.Request) {
+ return ctx, req.WithContext(ctx)
+}
diff --git a/vendor/github.com/googleapis/gax-go/call_option.go b/vendor/github.com/googleapis/gax-go/call_option.go
index 4ba1cdf..536cb8c 100644
--- a/vendor/github.com/googleapis/gax-go/call_option.go
+++ b/vendor/github.com/googleapis/gax-go/call_option.go
@@ -129,8 +129,21 @@ func (bo *Backoff) Pause() time.Duration {
return d
}
+type grpcOpt []grpc.CallOption
+
+func (o grpcOpt) Resolve(s *CallSettings) {
+ s.GRPC = o
+}
+
+func WithGRPCOptions(opt ...grpc.CallOption) CallOption {
+ return grpcOpt(append([]grpc.CallOption(nil), opt...))
+}
+
type CallSettings struct {
// Retry returns a Retryer to be used to control retry logic of a method call.
// If Retry is nil or the returned Retryer is nil, the call will not be retried.
Retry func() Retryer
+
+ // CallOptions to be forwarded to GRPC.
+ GRPC []grpc.CallOption
}
diff --git a/vendor/github.com/googleapis/gax-go/header.go b/vendor/github.com/googleapis/gax-go/header.go
new file mode 100644
index 0000000..d81455e
--- /dev/null
+++ b/vendor/github.com/googleapis/gax-go/header.go
@@ -0,0 +1,24 @@
+package gax
+
+import "bytes"
+
+// XGoogHeader is for use by the Google Cloud Libraries only.
+//
+// XGoogHeader formats key-value pairs.
+// The resulting string is suitable for x-goog-api-client header.
+func XGoogHeader(keyval ...string) string {
+ if len(keyval) == 0 {
+ return ""
+ }
+ if len(keyval)%2 != 0 {
+ panic("gax.Header: odd argument count")
+ }
+ var buf bytes.Buffer
+ for i := 0; i < len(keyval); i += 2 {
+ buf.WriteByte(' ')
+ buf.WriteString(keyval[i])
+ buf.WriteByte('/')
+ buf.WriteString(keyval[i+1])
+ }
+ return buf.String()[1:]
+}
diff --git a/vendor/github.com/googleapis/gax-go/invoke.go b/vendor/github.com/googleapis/gax-go/invoke.go
index d2134e1..86049d8 100644
--- a/vendor/github.com/googleapis/gax-go/invoke.go
+++ b/vendor/github.com/googleapis/gax-go/invoke.go
@@ -36,7 +36,7 @@ import (
)
// A user defined call stub.
-type APICall func(context.Context) error
+type APICall func(context.Context, CallSettings) error
// Invoke calls the given APICall,
// performing retries as specified by opts, if any.
@@ -67,7 +67,7 @@ type sleeper func(ctx context.Context, d time.Duration) error
func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper) error {
var retryer Retryer
for {
- err := call(ctx)
+ err := call(ctx, settings)
if err == nil {
return nil
}
diff --git a/vendor/github.com/gorilla/csrf/README.md b/vendor/github.com/gorilla/csrf/README.md
index 8cad716..75e8525 100644
--- a/vendor/github.com/gorilla/csrf/README.md
+++ b/vendor/github.com/gorilla/csrf/README.md
@@ -1,5 +1,5 @@
# gorilla/csrf
-[![GoDoc](https://godoc.org/github.com/gorilla/csrf?status.svg)](https://godoc.org/github.com/gorilla/csrf) [![Build Status](https://travis-ci.org/gorilla/csrf.svg?branch=master)](https://travis-ci.org/gorilla/csrf)
+[![GoDoc](https://godoc.org/github.com/gorilla/csrf?status.svg)](https://godoc.org/github.com/gorilla/csrf) [![Build Status](https://travis-ci.org/gorilla/csrf.svg?branch=master)](https://travis-ci.org/gorilla/csrf) [![Sourcegraph](https://sourcegraph.com/github.com/gorilla/csrf/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/csrf?badge)
gorilla/csrf is a HTTP middleware library that provides [cross-site request
forgery](http://blog.codinghorror.com/preventing-csrf-and-xsrf-attacks/) (CSRF)
diff --git a/vendor/github.com/gorilla/csrf/doc.go b/vendor/github.com/gorilla/csrf/doc.go
index e0bf408..301abe0 100644
--- a/vendor/github.com/gorilla/csrf/doc.go
+++ b/vendor/github.com/gorilla/csrf/doc.go
@@ -135,6 +135,10 @@ providing a JSON API:
w.Write(b)
}
+If you're writing a client that's supposed to mimic browser behavior, make sure to
+send back the CSRF cookie (the default name is _gorilla_csrf, but this can be changed
+with the CookieName Option) along with either the X-CSRF-Token header or the gorilla.csrf.Token form field.
+
In addition: getting CSRF protection right is important, so here's some background:
* This library generates unique-per-request (masked) tokens as a mitigation
diff --git a/vendor/github.com/gorilla/handlers/README.md b/vendor/github.com/gorilla/handlers/README.md
index a782c41..4a6895d 100644
--- a/vendor/github.com/gorilla/handlers/README.md
+++ b/vendor/github.com/gorilla/handlers/README.md
@@ -1,6 +1,8 @@
gorilla/handlers
================
[![GoDoc](https://godoc.org/github.com/gorilla/handlers?status.svg)](https://godoc.org/github.com/gorilla/handlers) [![Build Status](https://travis-ci.org/gorilla/handlers.svg?branch=master)](https://travis-ci.org/gorilla/handlers)
+[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/handlers/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/handlers?badge)
+
Package handlers is a collection of handlers (aka "HTTP middleware") for use
with Go's `net/http` package (or any framework supporting `http.Handler`), including:
diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md
index 94d396c..cdab878 100644
--- a/vendor/github.com/gorilla/mux/README.md
+++ b/vendor/github.com/gorilla/mux/README.md
@@ -2,6 +2,7 @@ gorilla/mux
===
[![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux)
[![Build Status](https://travis-ci.org/gorilla/mux.svg?branch=master)](https://travis-ci.org/gorilla/mux)
+[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge)
![Gorilla Logo](http://www.gorillatoolkit.org/static/images/gorilla-icon-64.png)
diff --git a/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/gorilla/mux/route.go
index 9221915..5544c1f 100644
--- a/vendor/github.com/gorilla/mux/route.go
+++ b/vendor/github.com/gorilla/mux/route.go
@@ -153,7 +153,7 @@ func (r *Route) addRegexpMatcher(tpl string, matchHost, matchPrefix, matchQuery
}
r.regexp = r.getRegexpGroup()
if !matchHost && !matchQuery {
- if tpl == "/" && (len(tpl) == 0 || tpl[0] != '/') {
+ if len(tpl) > 0 && tpl[0] != '/' {
return fmt.Errorf("mux: path must start with a slash, got %q", tpl)
}
if r.regexp.path != nil {
diff --git a/vendor/github.com/gorilla/securecookie/README.md b/vendor/github.com/gorilla/securecookie/README.md
index da112e4..aa7bd1a 100644
--- a/vendor/github.com/gorilla/securecookie/README.md
+++ b/vendor/github.com/gorilla/securecookie/README.md
@@ -1,6 +1,8 @@
securecookie
============
[![GoDoc](https://godoc.org/github.com/gorilla/securecookie?status.svg)](https://godoc.org/github.com/gorilla/securecookie) [![Build Status](https://travis-ci.org/gorilla/securecookie.png?branch=master)](https://travis-ci.org/gorilla/securecookie)
+[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/securecookie/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/securecookie?badge)
+
securecookie encodes and decodes authenticated and optionally encrypted
cookie values.
diff --git a/vendor/github.com/gorilla/sessions/README.md b/vendor/github.com/gorilla/sessions/README.md
index 5bb3107..ebc60d0 100644
--- a/vendor/github.com/gorilla/sessions/README.md
+++ b/vendor/github.com/gorilla/sessions/README.md
@@ -1,6 +1,8 @@
sessions
========
[![GoDoc](https://godoc.org/github.com/gorilla/sessions?status.svg)](https://godoc.org/github.com/gorilla/sessions) [![Build Status](https://travis-ci.org/gorilla/sessions.png?branch=master)](https://travis-ci.org/gorilla/sessions)
+[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/sessions/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/sessions?badge)
+
gorilla/sessions provides cookie and filesystem sessions and infrastructure for
custom session backends.
@@ -52,6 +54,12 @@ with
as or else you will leak memory! An easy way to do this is to wrap the top-level
mux when calling http.ListenAndServe:
+```go
+ http.ListenAndServe(":8080", context.ClearHandler(http.DefaultServeMux))
+```
+
+The ClearHandler function is provided by the gorilla/context package.
+
More examples are available [on the Gorilla
website](http://www.gorillatoolkit.org/pkg/sessions).
@@ -63,6 +71,7 @@ Other implementations of the `sessions.Store` interface:
* [github.com/yosssi/boltstore](https://github.com/yosssi/boltstore) - Bolt
* [github.com/srinathgs/couchbasestore](https://github.com/srinathgs/couchbasestore) - Couchbase
* [github.com/denizeren/dynamostore](https://github.com/denizeren/dynamostore) - Dynamodb on AWS
+* [github.com/savaki/dynastore](https://github.com/savaki/dynastore) - DynamoDB on AWS (Official AWS library)
* [github.com/bradleypeabody/gorilla-sessions-memcache](https://github.com/bradleypeabody/gorilla-sessions-memcache) - Memcache
* [github.com/dsoprea/go-appengine-sessioncascade](https://github.com/dsoprea/go-appengine-sessioncascade) - Memcache/Datastore/Context in AppEngine
* [github.com/kidstuff/mongostore](https://github.com/kidstuff/mongostore) - MongoDB
diff --git a/vendor/github.com/hashicorp/vault/api/client.go b/vendor/github.com/hashicorp/vault/api/client.go
index fa06d46..5f8a6f6 100644
--- a/vendor/github.com/hashicorp/vault/api/client.go
+++ b/vendor/github.com/hashicorp/vault/api/client.go
@@ -11,6 +11,8 @@ import (
"sync"
"time"
+ "golang.org/x/net/http2"
+
"github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/go-rootcerts"
"github.com/sethgrid/pester"
@@ -25,6 +27,7 @@ const EnvVaultInsecure = "VAULT_SKIP_VERIFY"
const EnvVaultTLSServerName = "VAULT_TLS_SERVER_NAME"
const EnvVaultWrapTTL = "VAULT_WRAP_TTL"
const EnvVaultMaxRetries = "VAULT_MAX_RETRIES"
+const EnvVaultToken = "VAULT_TOKEN"
// WrappingLookupFunc is a function that, given an HTTP verb and a path,
// returns an optional string duration to be used for response wrapping (e.g.
@@ -84,8 +87,7 @@ type TLSConfig struct {
// setting the `VAULT_ADDR` environment variable.
func DefaultConfig() *Config {
config := &Config{
- Address: "https://127.0.0.1:8200",
-
+ Address: "https://127.0.0.1:8200",
HttpClient: cleanhttp.DefaultClient(),
}
config.HttpClient.Timeout = time.Second * 60
@@ -104,7 +106,6 @@ func DefaultConfig() *Config {
// ConfigureTLS takes a set of TLS configurations and applies those to the the HTTP client.
func (c *Config) ConfigureTLS(t *TLSConfig) error {
-
if c.HttpClient == nil {
c.HttpClient = DefaultConfig().HttpClient
}
@@ -247,6 +248,11 @@ func NewClient(c *Config) (*Client, error) {
c.HttpClient = DefaultConfig().HttpClient
}
+ tp := c.HttpClient.Transport.(*http.Transport)
+ if err := http2.ConfigureTransport(tp); err != nil {
+ return nil, err
+ }
+
redirFunc := func() {
// Ensure redirects are not automatically followed
// Note that this is sane for the API client as it has its own
@@ -254,9 +260,9 @@ func NewClient(c *Config) (*Client, error) {
// but in e.g. http_test actual redirect handling is necessary
c.HttpClient.CheckRedirect = func(req *http.Request, via []*http.Request) error {
// Returning this value causes the Go net library to not close the
- // response body and nil out the error. Otherwise pester tries
+ // response body and to nil out the error. Otherwise pester tries
// three times on every redirect because it sees an error from this
- // function being passed through.
+ // function (to prevent redirects) passing through to it.
return http.ErrUseLastResponse
}
}
@@ -268,7 +274,7 @@ func NewClient(c *Config) (*Client, error) {
config: c,
}
- if token := os.Getenv("VAULT_TOKEN"); token != "" {
+ if token := os.Getenv(EnvVaultToken); token != "" {
client.SetToken(token)
}
@@ -292,6 +298,11 @@ func (c *Client) Address() string {
return c.addr.String()
}
+// SetMaxRetries sets the number of retries that will be used in the case of certain errors
+func (c *Client) SetMaxRetries(retries int) {
+ c.config.MaxRetries = retries
+}
+
// SetWrappingLookupFunc sets a lookup function that returns desired wrap TTLs
// for a given operation and path
func (c *Client) SetWrappingLookupFunc(lookupFunc WrappingLookupFunc) {
@@ -322,6 +333,7 @@ func (c *Client) NewRequest(method, path string) *Request {
req := &Request{
Method: method,
URL: &url.URL{
+ User: c.addr.User,
Scheme: c.addr.Scheme,
Host: c.addr.Host,
Path: path,
diff --git a/vendor/github.com/hashicorp/vault/api/request.go b/vendor/github.com/hashicorp/vault/api/request.go
index 8f22dd5..685e2d7 100644
--- a/vendor/github.com/hashicorp/vault/api/request.go
+++ b/vendor/github.com/hashicorp/vault/api/request.go
@@ -55,6 +55,7 @@ func (r *Request) ToHTTP() (*http.Request, error) {
return nil, err
}
+ req.URL.User = r.URL.User
req.URL.Scheme = r.URL.Scheme
req.URL.Host = r.URL.Host
req.Host = r.URL.Host
diff --git a/vendor/github.com/hashicorp/vault/api/sys_mounts.go b/vendor/github.com/hashicorp/vault/api/sys_mounts.go
index 768e09f..907fddb 100644
--- a/vendor/github.com/hashicorp/vault/api/sys_mounts.go
+++ b/vendor/github.com/hashicorp/vault/api/sys_mounts.go
@@ -129,6 +129,7 @@ type MountInput struct {
type MountConfigInput struct {
DefaultLeaseTTL string `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"`
MaxLeaseTTL string `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"`
+ ForceNoCache bool `json:"force_no_cache" structs:"force_no_cache" mapstructure:"force_no_cache"`
}
type MountOutput struct {
@@ -139,6 +140,7 @@ type MountOutput struct {
}
type MountConfigOutput struct {
- DefaultLeaseTTL int `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"`
- MaxLeaseTTL int `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"`
+ DefaultLeaseTTL int `json:"default_lease_ttl" structs:"default_lease_ttl" mapstructure:"default_lease_ttl"`
+ MaxLeaseTTL int `json:"max_lease_ttl" structs:"max_lease_ttl" mapstructure:"max_lease_ttl"`
+ ForceNoCache bool `json:"force_no_cache" structs:"force_no_cache" mapstructure:"force_no_cache"`
}
diff --git a/vendor/github.com/homemade/scl/scope.go b/vendor/github.com/homemade/scl/scope.go
index e8105ae..e8105ae 100755..100644
--- a/vendor/github.com/homemade/scl/scope.go
+++ b/vendor/github.com/homemade/scl/scope.go
diff --git a/vendor/github.com/homemade/scl/tokeniser.go b/vendor/github.com/homemade/scl/tokeniser.go
index 684c7dd..0986ef8 100644
--- a/vendor/github.com/homemade/scl/tokeniser.go
+++ b/vendor/github.com/homemade/scl/tokeniser.go
@@ -32,7 +32,7 @@ func (t *tokeniser) resetComment() {
func (t *tokeniser) stripComments(l *scannerLine) string {
- lastQuote := rune(0)
+ lastQuote := []rune{rune(0)}
slash := rune(47)
slashCount := 0
@@ -43,15 +43,15 @@ func (t *tokeniser) stripComments(l *scannerLine) string {
c := rune(v)
switch {
- case c == lastQuote:
- lastQuote = rune(0)
+ case c == lastQuote[0]:
+ lastQuote = lastQuote[1:]
slashCount = 0
case unicode.In(c, unicode.Quotation_Mark):
- lastQuote = c
+ lastQuote = append([]rune{c}, lastQuote...)
slashCount = 0
- case c == slash && lastQuote == rune(0):
+ case c == slash && lastQuote[0] == rune(0):
slashCount++
if slashCount == 2 {
diff --git a/vendor/github.com/jmoiron/sqlx/bind.go b/vendor/github.com/jmoiron/sqlx/bind.go
index 53659bc..10f7bdf 100644
--- a/vendor/github.com/jmoiron/sqlx/bind.go
+++ b/vendor/github.com/jmoiron/sqlx/bind.go
@@ -43,27 +43,28 @@ func Rebind(bindType int, query string) string {
return query
}
- qb := []byte(query)
// Add space enough for 10 params before we have to allocate
- rqb := make([]byte, 0, len(qb)+10)
- j := 1
- for _, b := range qb {
- if b == '?' {
- switch bindType {
- case DOLLAR:
- rqb = append(rqb, '$')
- case NAMED:
- rqb = append(rqb, ':', 'a', 'r', 'g')
- }
- for _, b := range strconv.Itoa(j) {
- rqb = append(rqb, byte(b))
- }
- j++
- } else {
- rqb = append(rqb, b)
+ rqb := make([]byte, 0, len(query)+10)
+
+ var i, j int
+
+ for i = strings.Index(query, "?"); i != -1; i = strings.Index(query, "?") {
+ rqb = append(rqb, query[:i]...)
+
+ switch bindType {
+ case DOLLAR:
+ rqb = append(rqb, '$')
+ case NAMED:
+ rqb = append(rqb, ':', 'a', 'r', 'g')
}
+
+ j++
+ rqb = strconv.AppendInt(rqb, int64(j), 10)
+
+ query = query[i+1:]
}
- return string(rqb)
+
+ return string(append(rqb, query...))
}
// Experimental implementation of Rebind which uses a bytes.Buffer. The code is
@@ -135,9 +136,9 @@ func In(query string, args ...interface{}) (string, []interface{}, error) {
}
newArgs := make([]interface{}, 0, flatArgsCount)
+ buf := bytes.NewBuffer(make([]byte, 0, len(query)+len(", ?")*flatArgsCount))
var arg, offset int
- var buf bytes.Buffer
for i := strings.IndexByte(query[offset:], '?'); i != -1; i = strings.IndexByte(query[offset:], '?') {
if arg >= len(meta) {
@@ -163,13 +164,12 @@ func In(query string, args ...interface{}) (string, []interface{}, error) {
// write everything up to and including our ? character
buf.WriteString(query[:offset+i+1])
- newArgs = append(newArgs, argMeta.v.Index(0).Interface())
-
for si := 1; si < argMeta.length; si++ {
buf.WriteString(", ?")
- newArgs = append(newArgs, argMeta.v.Index(si).Interface())
}
+ newArgs = appendReflectSlice(newArgs, argMeta.v, argMeta.length)
+
// slice the query and reset the offset. this avoids some bookkeeping for
// the write after the loop
query = query[offset+i+1:]
@@ -184,3 +184,24 @@ func In(query string, args ...interface{}) (string, []interface{}, error) {
return buf.String(), newArgs, nil
}
+
+func appendReflectSlice(args []interface{}, v reflect.Value, vlen int) []interface{} {
+ switch val := v.Interface().(type) {
+ case []interface{}:
+ args = append(args, val...)
+ case []int:
+ for i := range val {
+ args = append(args, val[i])
+ }
+ case []string:
+ for i := range val {
+ args = append(args, val[i])
+ }
+ default:
+ for si := 0; si < vlen; si++ {
+ args = append(args, v.Index(si).Interface())
+ }
+ }
+
+ return args
+}
diff --git a/vendor/github.com/jmoiron/sqlx/named_context.go b/vendor/github.com/jmoiron/sqlx/named_context.go
new file mode 100644
index 0000000..9405007
--- /dev/null
+++ b/vendor/github.com/jmoiron/sqlx/named_context.go
@@ -0,0 +1,132 @@
+// +build go1.8
+
+package sqlx
+
+import (
+ "context"
+ "database/sql"
+)
+
+// A union interface of contextPreparer and binder, required to be able to
+// prepare named statements with context (as the bindtype must be determined).
+type namedPreparerContext interface {
+ PreparerContext
+ binder
+}
+
+func prepareNamedContext(ctx context.Context, p namedPreparerContext, query string) (*NamedStmt, error) {
+ bindType := BindType(p.DriverName())
+ q, args, err := compileNamedQuery([]byte(query), bindType)
+ if err != nil {
+ return nil, err
+ }
+ stmt, err := PreparexContext(ctx, p, q)
+ if err != nil {
+ return nil, err
+ }
+ return &NamedStmt{
+ QueryString: q,
+ Params: args,
+ Stmt: stmt,
+ }, nil
+}
+
+// ExecContext executes a named statement using the struct passed.
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) ExecContext(ctx context.Context, arg interface{}) (sql.Result, error) {
+ args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
+ if err != nil {
+ return *new(sql.Result), err
+ }
+ return n.Stmt.ExecContext(ctx, args...)
+}
+
+// QueryContext executes a named statement using the struct argument, returning rows.
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) QueryContext(ctx context.Context, arg interface{}) (*sql.Rows, error) {
+ args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
+ if err != nil {
+ return nil, err
+ }
+ return n.Stmt.QueryContext(ctx, args...)
+}
+
+// QueryRowContext executes a named statement against the database. Because sqlx cannot
+// create a *sql.Row with an error condition pre-set for binding errors, sqlx
+// returns a *sqlx.Row instead.
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) QueryRowContext(ctx context.Context, arg interface{}) *Row {
+ args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper)
+ if err != nil {
+ return &Row{err: err}
+ }
+ return n.Stmt.QueryRowxContext(ctx, args...)
+}
+
+// MustExecContext execs a NamedStmt, panicing on error
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) MustExecContext(ctx context.Context, arg interface{}) sql.Result {
+ res, err := n.ExecContext(ctx, arg)
+ if err != nil {
+ panic(err)
+ }
+ return res
+}
+
+// QueryxContext using this NamedStmt
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) QueryxContext(ctx context.Context, arg interface{}) (*Rows, error) {
+ r, err := n.QueryContext(ctx, arg)
+ if err != nil {
+ return nil, err
+ }
+ return &Rows{Rows: r, Mapper: n.Stmt.Mapper, unsafe: isUnsafe(n)}, err
+}
+
+// QueryRowxContext this NamedStmt. Because of limitations with QueryRow, this is
+// an alias for QueryRow.
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) QueryRowxContext(ctx context.Context, arg interface{}) *Row {
+ return n.QueryRowContext(ctx, arg)
+}
+
+// SelectContext using this NamedStmt
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) SelectContext(ctx context.Context, dest interface{}, arg interface{}) error {
+ rows, err := n.QueryxContext(ctx, arg)
+ if err != nil {
+ return err
+ }
+ // if something happens here, we want to make sure the rows are Closed
+ defer rows.Close()
+ return scanAll(rows, dest, false)
+}
+
+// GetContext using this NamedStmt
+// Any named placeholder parameters are replaced with fields from arg.
+func (n *NamedStmt) GetContext(ctx context.Context, dest interface{}, arg interface{}) error {
+ r := n.QueryRowxContext(ctx, arg)
+ return r.scanAny(dest, false)
+}
+
+// NamedQueryContext binds a named query and then runs Query on the result using the
+// provided Ext (sqlx.Tx, sqlx.Db). It works with both structs and with
+// map[string]interface{} types.
+func NamedQueryContext(ctx context.Context, e ExtContext, query string, arg interface{}) (*Rows, error) {
+ q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))
+ if err != nil {
+ return nil, err
+ }
+ return e.QueryxContext(ctx, q, args...)
+}
+
+// NamedExecContext uses BindStruct to get a query executable by the driver and
+// then runs Exec on the result. Returns an error from the binding
+// or the query excution itself.
+func NamedExecContext(ctx context.Context, e ExtContext, query string, arg interface{}) (sql.Result, error) {
+ q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e))
+ if err != nil {
+ return nil, err
+ }
+ return e.ExecContext(ctx, q, args...)
+}
diff --git a/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go b/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go
index 427ed2a..f2802b8 100644
--- a/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go
+++ b/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go
@@ -127,7 +127,7 @@ func (m *Mapper) FieldMap(v reflect.Value) map[string]reflect.Value {
return r
}
-// FieldByName returns a field by the its mapped name as a reflect.Value.
+// FieldByName returns a field by its mapped name as a reflect.Value.
// Panics if v's Kind is not Struct or v is not Indirectable to a struct Kind.
// Returns zero Value if the name is not found.
func (m *Mapper) FieldByName(v reflect.Value, name string) reflect.Value {
diff --git a/vendor/github.com/jmoiron/sqlx/sqlx_context.go b/vendor/github.com/jmoiron/sqlx/sqlx_context.go
new file mode 100644
index 0000000..0173056
--- /dev/null
+++ b/vendor/github.com/jmoiron/sqlx/sqlx_context.go
@@ -0,0 +1,329 @@
+// +build go1.8
+
+package sqlx
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+ "reflect"
+)
+
+// ConnectContext to a database and verify with a ping.
+func ConnectContext(ctx context.Context, driverName, dataSourceName string) (*DB, error) {
+ db, err := Open(driverName, dataSourceName)
+ if err != nil {
+ return db, err
+ }
+ err = db.PingContext(ctx)
+ return db, err
+}
+
+// QueryerContext is an interface used by GetContext and SelectContext
+type QueryerContext interface {
+ QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error)
+ QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error)
+ QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row
+}
+
+// PreparerContext is an interface used by PreparexContext.
+type PreparerContext interface {
+ PrepareContext(ctx context.Context, query string) (*sql.Stmt, error)
+}
+
+// ExecerContext is an interface used by MustExecContext and LoadFileContext
+type ExecerContext interface {
+ ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error)
+}
+
+// ExtContext is a union interface which can bind, query, and exec, with Context
+// used by NamedQueryContext and NamedExecContext.
+type ExtContext interface {
+ binder
+ QueryerContext
+ ExecerContext
+}
+
+// SelectContext executes a query using the provided Queryer, and StructScans
+// each row into dest, which must be a slice. If the slice elements are
+// scannable, then the result set must have only one column. Otherwise,
+// StructScan is used. The *sql.Rows are closed automatically.
+// Any placeholder parameters are replaced with supplied args.
+func SelectContext(ctx context.Context, q QueryerContext, dest interface{}, query string, args ...interface{}) error {
+ rows, err := q.QueryxContext(ctx, query, args...)
+ if err != nil {
+ return err
+ }
+ // if something happens here, we want to make sure the rows are Closed
+ defer rows.Close()
+ return scanAll(rows, dest, false)
+}
+
+// PreparexContext prepares a statement.
+//
+// The provided context is used for the preparation of the statement, not for
+// the execution of the statement.
+func PreparexContext(ctx context.Context, p PreparerContext, query string) (*Stmt, error) {
+ s, err := p.PrepareContext(ctx, query)
+ if err != nil {
+ return nil, err
+ }
+ return &Stmt{Stmt: s, unsafe: isUnsafe(p), Mapper: mapperFor(p)}, err
+}
+
+// GetContext does a QueryRow using the provided Queryer, and scans the
+// resulting row to dest. If dest is scannable, the result must only have one
+// column. Otherwise, StructScan is used. Get will return sql.ErrNoRows like
+// row.Scan would. Any placeholder parameters are replaced with supplied args.
+// An error is returned if the result set is empty.
+func GetContext(ctx context.Context, q QueryerContext, dest interface{}, query string, args ...interface{}) error {
+ r := q.QueryRowxContext(ctx, query, args...)
+ return r.scanAny(dest, false)
+}
+
+// LoadFileContext exec's every statement in a file (as a single call to Exec).
+// LoadFileContext may return a nil *sql.Result if errors are encountered
+// locating or reading the file at path. LoadFile reads the entire file into
+// memory, so it is not suitable for loading large data dumps, but can be useful
+// for initializing schemas or loading indexes.
+//
+// FIXME: this does not really work with multi-statement files for mattn/go-sqlite3
+// or the go-mysql-driver/mysql drivers; pq seems to be an exception here. Detecting
+// this by requiring something with DriverName() and then attempting to split the
+// queries will be difficult to get right, and its current driver-specific behavior
+// is deemed at least not complex in its incorrectness.
+func LoadFileContext(ctx context.Context, e ExecerContext, path string) (*sql.Result, error) {
+ realpath, err := filepath.Abs(path)
+ if err != nil {
+ return nil, err
+ }
+ contents, err := ioutil.ReadFile(realpath)
+ if err != nil {
+ return nil, err
+ }
+ res, err := e.ExecContext(ctx, string(contents))
+ return &res, err
+}
+
+// MustExecContext execs the query using e and panics if there was an error.
+// Any placeholder parameters are replaced with supplied args.
+func MustExecContext(ctx context.Context, e ExecerContext, query string, args ...interface{}) sql.Result {
+ res, err := e.ExecContext(ctx, query, args...)
+ if err != nil {
+ panic(err)
+ }
+ return res
+}
+
+// PrepareNamedContext returns an sqlx.NamedStmt
+func (db *DB) PrepareNamedContext(ctx context.Context, query string) (*NamedStmt, error) {
+ return prepareNamedContext(ctx, db, query)
+}
+
+// NamedQueryContext using this DB.
+// Any named placeholder parameters are replaced with fields from arg.
+func (db *DB) NamedQueryContext(ctx context.Context, query string, arg interface{}) (*Rows, error) {
+ return NamedQueryContext(ctx, db, query, arg)
+}
+
+// NamedExecContext using this DB.
+// Any named placeholder parameters are replaced with fields from arg.
+func (db *DB) NamedExecContext(ctx context.Context, query string, arg interface{}) (sql.Result, error) {
+ return NamedExecContext(ctx, db, query, arg)
+}
+
+// SelectContext using this DB.
+// Any placeholder parameters are replaced with supplied args.
+func (db *DB) SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
+ return SelectContext(ctx, db, dest, query, args...)
+}
+
+// GetContext using this DB.
+// Any placeholder parameters are replaced with supplied args.
+// An error is returned if the result set is empty.
+func (db *DB) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
+ return GetContext(ctx, db, dest, query, args...)
+}
+
+// PreparexContext returns an sqlx.Stmt instead of a sql.Stmt.
+//
+// The provided context is used for the preparation of the statement, not for
+// the execution of the statement.
+func (db *DB) PreparexContext(ctx context.Context, query string) (*Stmt, error) {
+ return PreparexContext(ctx, db, query)
+}
+
+// QueryxContext queries the database and returns an *sqlx.Rows.
+// Any placeholder parameters are replaced with supplied args.
+func (db *DB) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) {
+ r, err := db.DB.QueryContext(ctx, query, args...)
+ if err != nil {
+ return nil, err
+ }
+ return &Rows{Rows: r, unsafe: db.unsafe, Mapper: db.Mapper}, err
+}
+
+// QueryRowxContext queries the database and returns an *sqlx.Row.
+// Any placeholder parameters are replaced with supplied args.
+func (db *DB) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row {
+ rows, err := db.DB.QueryContext(ctx, query, args...)
+ return &Row{rows: rows, err: err, unsafe: db.unsafe, Mapper: db.Mapper}
+}
+
+// MustBeginTx starts a transaction, and panics on error. Returns an *sqlx.Tx instead
+// of an *sql.Tx.
+//
+// The provided context is used until the transaction is committed or rolled
+// back. If the context is canceled, the sql package will roll back the
+// transaction. Tx.Commit will return an error if the context provided to
+// MustBeginContext is canceled.
+func (db *DB) MustBeginTx(ctx context.Context, opts *sql.TxOptions) *Tx {
+ tx, err := db.BeginTxx(ctx, opts)
+ if err != nil {
+ panic(err)
+ }
+ return tx
+}
+
+// MustExecContext (panic) runs MustExec using this database.
+// Any placeholder parameters are replaced with supplied args.
+func (db *DB) MustExecContext(ctx context.Context, query string, args ...interface{}) sql.Result {
+ return MustExecContext(ctx, db, query, args...)
+}
+
+// BeginTxx begins a transaction and returns an *sqlx.Tx instead of an
+// *sql.Tx.
+//
+// The provided context is used until the transaction is committed or rolled
+// back. If the context is canceled, the sql package will roll back the
+// transaction. Tx.Commit will return an error if the context provided to
+// BeginxContext is canceled.
+func (db *DB) BeginTxx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) {
+ tx, err := db.DB.BeginTx(ctx, opts)
+ if err != nil {
+ return nil, err
+ }
+ return &Tx{Tx: tx, driverName: db.driverName, unsafe: db.unsafe, Mapper: db.Mapper}, err
+}
+
+// StmtxContext returns a version of the prepared statement which runs within a
+// transaction. Provided stmt can be either *sql.Stmt or *sqlx.Stmt.
+func (tx *Tx) StmtxContext(ctx context.Context, stmt interface{}) *Stmt {
+ var s *sql.Stmt
+ switch v := stmt.(type) {
+ case Stmt:
+ s = v.Stmt
+ case *Stmt:
+ s = v.Stmt
+ case sql.Stmt:
+ s = &v
+ case *sql.Stmt:
+ s = v
+ default:
+ panic(fmt.Sprintf("non-statement type %v passed to Stmtx", reflect.ValueOf(stmt).Type()))
+ }
+ return &Stmt{Stmt: tx.StmtContext(ctx, s), Mapper: tx.Mapper}
+}
+
+// NamedStmtContext returns a version of the prepared statement which runs
+// within a transaction.
+func (tx *Tx) NamedStmtContext(ctx context.Context, stmt *NamedStmt) *NamedStmt {
+ return &NamedStmt{
+ QueryString: stmt.QueryString,
+ Params: stmt.Params,
+ Stmt: tx.StmtxContext(ctx, stmt.Stmt),
+ }
+}
+
+// MustExecContext runs MustExecContext within a transaction.
+// Any placeholder parameters are replaced with supplied args.
+func (tx *Tx) MustExecContext(ctx context.Context, query string, args ...interface{}) sql.Result {
+ return MustExecContext(ctx, tx, query, args...)
+}
+
+// QueryxContext within a transaction and context.
+// Any placeholder parameters are replaced with supplied args.
+func (tx *Tx) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) {
+ r, err := tx.Tx.QueryContext(ctx, query, args...)
+ if err != nil {
+ return nil, err
+ }
+ return &Rows{Rows: r, unsafe: tx.unsafe, Mapper: tx.Mapper}, err
+}
+
+// SelectContext within a transaction and context.
+// Any placeholder parameters are replaced with supplied args.
+func (tx *Tx) SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
+ return SelectContext(ctx, tx, dest, query, args...)
+}
+
+// GetContext within a transaction and context.
+// Any placeholder parameters are replaced with supplied args.
+// An error is returned if the result set is empty.
+func (tx *Tx) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error {
+ return GetContext(ctx, tx, dest, query, args...)
+}
+
+// QueryRowxContext within a transaction and context.
+// Any placeholder parameters are replaced with supplied args.
+func (tx *Tx) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row {
+ rows, err := tx.Tx.QueryContext(ctx, query, args...)
+ return &Row{rows: rows, err: err, unsafe: tx.unsafe, Mapper: tx.Mapper}
+}
+
+// SelectContext using the prepared statement.
+// Any placeholder parameters are replaced with supplied args.
+func (s *Stmt) SelectContext(ctx context.Context, dest interface{}, args ...interface{}) error {
+ return SelectContext(ctx, &qStmt{s}, dest, "", args...)
+}
+
+// GetContext using the prepared statement.
+// Any placeholder parameters are replaced with supplied args.
+// An error is returned if the result set is empty.
+func (s *Stmt) GetContext(ctx context.Context, dest interface{}, args ...interface{}) error {
+ return GetContext(ctx, &qStmt{s}, dest, "", args...)
+}
+
+// MustExecContext (panic) using this statement. Note that the query portion of
+// the error output will be blank, as Stmt does not expose its query.
+// Any placeholder parameters are replaced with supplied args.
+func (s *Stmt) MustExecContext(ctx context.Context, args ...interface{}) sql.Result {
+ return MustExecContext(ctx, &qStmt{s}, "", args...)
+}
+
+// QueryRowxContext using this statement.
+// Any placeholder parameters are replaced with supplied args.
+func (s *Stmt) QueryRowxContext(ctx context.Context, args ...interface{}) *Row {
+ qs := &qStmt{s}
+ return qs.QueryRowxContext(ctx, "", args...)
+}
+
+// QueryxContext using this statement.
+// Any placeholder parameters are replaced with supplied args.
+func (s *Stmt) QueryxContext(ctx context.Context, args ...interface{}) (*Rows, error) {
+ qs := &qStmt{s}
+ return qs.QueryxContext(ctx, "", args...)
+}
+
+func (q *qStmt) QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) {
+ return q.Stmt.QueryContext(ctx, args...)
+}
+
+func (q *qStmt) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) {
+ r, err := q.Stmt.QueryContext(ctx, args...)
+ if err != nil {
+ return nil, err
+ }
+ return &Rows{Rows: r, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}, err
+}
+
+func (q *qStmt) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row {
+ rows, err := q.Stmt.QueryContext(ctx, args...)
+ return &Row{rows: rows, err: err, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}
+}
+
+func (q *qStmt) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) {
+ return q.Stmt.ExecContext(ctx, args...)
+}
diff --git a/vendor/github.com/magiconair/properties/CHANGELOG.md b/vendor/github.com/magiconair/properties/CHANGELOG.md
index 89ee938..1d86d0c 100644
--- a/vendor/github.com/magiconair/properties/CHANGELOG.md
+++ b/vendor/github.com/magiconair/properties/CHANGELOG.md
@@ -2,7 +2,13 @@
### Unreleased
+ * [Issue #17](https://github.com/magiconair/properties/issues/17): Add [SetValue()](http://godoc.org/github.com/magiconair/properties#Properties.SetValue) method to set values generically
+ * [Issue #22](https://github.com/magiconair/properties/issues/22): Add [LoadMap()](http://godoc.org/github.com/magiconair/properties#LoadMap) function to load properties from a string map
+
+### [1.7.2](https://github.com/magiconair/properties/tags/v1.7.2) - 20 Mar 2017
+
* [Issue #15](https://github.com/magiconair/properties/issues/15): Drop gocheck dependency
+ * [PR #21](https://github.com/magiconair/properties/pull/21): Add [Map()](http://godoc.org/github.com/magiconair/properties#Properties.Map) and [FilterFunc()](http://godoc.org/github.com/magiconair/properties#Properties.FilterFunc)
### [1.7.1](https://github.com/magiconair/properties/tags/v1.7.1) - 13 Jan 2017
@@ -11,8 +17,8 @@
### [1.7.0](https://github.com/magiconair/properties/tags/v1.7.0) - 20 Mar 2016
- * [Issue #10](https://github.com/magiconair/properties/issues/10): Add [LoadURL,LoadURLs,MustLoadURL,MustLoadURLs](http://godoc.org/github.com/magiconair/properties#Properties.LoadURL) method to load properties from a URL.
- * [Issue #11](https://github.com/magiconair/properties/issues/11): Add [LoadString,MustLoadString](http://godoc.org/github.com/magiconair/properties#Properties.LoadString) method to load properties from an UTF8 string.
+ * [Issue #10](https://github.com/magiconair/properties/issues/10): Add [LoadURL,LoadURLs,MustLoadURL,MustLoadURLs](http://godoc.org/github.com/magiconair/properties#LoadURL) method to load properties from a URL.
+ * [Issue #11](https://github.com/magiconair/properties/issues/11): Add [LoadString,MustLoadString](http://godoc.org/github.com/magiconair/properties#LoadString) method to load properties from an UTF8 string.
* [PR #8](https://github.com/magiconair/properties/pull/8): Add [MustFlag](http://godoc.org/github.com/magiconair/properties#Properties.MustFlag) method to provide overrides via command line flags. (@pascaldekloe)
### [1.6.0](https://github.com/magiconair/properties/tags/v1.6.0) - 11 Dec 2015
diff --git a/vendor/github.com/magiconair/properties/README.md b/vendor/github.com/magiconair/properties/README.md
index 5985911..71b6a53 100644
--- a/vendor/github.com/magiconair/properties/README.md
+++ b/vendor/github.com/magiconair/properties/README.md
@@ -1,7 +1,7 @@
Overview [![Build Status](https://travis-ci.org/magiconair/properties.svg?branch=master)](https://travis-ci.org/magiconair/properties)
========
-#### Current version: 1.7.1
+#### Current version: 1.7.2
properties is a Go library for reading and writing properties files.
@@ -25,6 +25,8 @@ changed from `panic` to `log.Fatal` but this is configurable and custom
error handling functions can be provided. See the package documentation for
details.
+Read the full documentation on [GoDoc](https://godoc.org/github.com/magiconair/properties) [![GoDoc](https://godoc.org/github.com/magiconair/properties?status.png)](https://godoc.org/github.com/magiconair/properties)
+
Getting Started
---------------
@@ -35,13 +37,38 @@ import (
)
func main() {
+ // init from a file
p := properties.MustLoadFile("${HOME}/config.properties", properties.UTF8)
- // via getters
+ // or multiple files
+ p = properties.MustLoadFiles([]string{
+ "${HOME}/config.properties",
+ "${HOME}/config-${USER}.properties",
+ }, properties.UTF8, true)
+
+ // or from a map
+ p = properties.LoadMap(map[string]string{"key": "value", "abc": "def"})
+
+ // or from a string
+ p = properties.MustLoadString("key=value\nabc=def")
+
+ // or from a URL
+ p = properties.MustLoadURL("http://host/path")
+
+ // or from multiple URLs
+ p = properties.MustLoadURL([]string{
+ "http://host/config",
+ "http://host/config-${USER}",
+ }, true)
+
+ // or from flags
+ p.MustFlag(flag.CommandLine)
+
+ // get values through getters
host := p.MustGetString("host")
port := p.GetInt("port", 8080)
- // or via decode
+ // or through Decode
type Config struct {
Host string `properties:"host"`
Port int `properties:"port,default=9000"`
@@ -52,18 +79,10 @@ func main() {
if err := p.Decode(&cfg); err != nil {
log.Fatal(err)
}
-
- // or via flags
- p.MustFlag(flag.CommandLine)
-
- // or via url
- p = properties.MustLoadURL("http://host/path")
}
```
-Read the full documentation on [GoDoc](https://godoc.org/github.com/magiconair/properties) [![GoDoc](https://godoc.org/github.com/magiconair/properties?status.png)](https://godoc.org/github.com/magiconair/properties)
-
Installation and Upgrade
------------------------
diff --git a/vendor/github.com/magiconair/properties/load.go b/vendor/github.com/magiconair/properties/load.go
index 701a86d..278cc2e 100644
--- a/vendor/github.com/magiconair/properties/load.go
+++ b/vendor/github.com/magiconair/properties/load.go
@@ -33,6 +33,15 @@ func LoadString(s string) (*Properties, error) {
return loadBuf([]byte(s), UTF8)
}
+// LoadMap creates a new Properties struct from a string map.
+func LoadMap(m map[string]string) *Properties {
+ p := NewProperties()
+ for k, v := range m {
+ p.Set(k, v)
+ }
+ return p
+}
+
// LoadFile reads a file into a Properties struct.
func LoadFile(filename string, enc Encoding) (*Properties, error) {
return loadAll([]string{filename}, enc, false)
diff --git a/vendor/github.com/magiconair/properties/properties.go b/vendor/github.com/magiconair/properties/properties.go
index 80360c9..4f3d5a4 100644
--- a/vendor/github.com/magiconair/properties/properties.go
+++ b/vendor/github.com/magiconair/properties/properties.go
@@ -542,6 +542,13 @@ func (p *Properties) Set(key, value string) (prev string, ok bool, err error) {
return prev, ok, nil
}
+// SetValue sets property key to the default string value
+// as defined by fmt.Sprintf("%v").
+func (p *Properties) SetValue(key string, value interface{}) error {
+ _, _, err := p.Set(key, fmt.Sprintf("%v", value))
+ return err
+}
+
// MustSet sets the property key to the corresponding value.
// If a value for key existed before then ok is true and prev
// contains the previous value. An empty key is silently ignored.
@@ -622,6 +629,30 @@ func (p *Properties) WriteComment(w io.Writer, prefix string, enc Encoding) (n i
return
}
+// Map returns a copy of the properties as a map.
+func (p *Properties) Map() map[string]string {
+ m := make(map[string]string)
+ for k, v := range p.m {
+ m[k] = v
+ }
+ return m
+}
+
+// FilterFunc returns a copy of the properties which includes the values which passed all filters.
+func (p *Properties) FilterFunc(filters ...func(k, v string) bool) *Properties {
+ pp := NewProperties()
+outer:
+ for k, v := range p.m {
+ for _, f := range filters {
+ if !f(k, v) {
+ continue outer
+ }
+ pp.Set(k, v)
+ }
+ }
+ return pp
+}
+
// ----------------------------------------------------------------------------
// Delete removes the key and its comments.
diff --git a/vendor/github.com/mattn/go-sqlite3/README.md b/vendor/github.com/mattn/go-sqlite3/README.md
index 825d3aa..8369013 100644
--- a/vendor/github.com/mattn/go-sqlite3/README.md
+++ b/vendor/github.com/mattn/go-sqlite3/README.md
@@ -1,9 +1,10 @@
go-sqlite3
==========
+[![GoDoc Reference](https://godoc.org/github.com/mattn/go-sqlite3?status.svg)](http://godoc.org/github.com/mattn/go-sqlite3)
[![Build Status](https://travis-ci.org/mattn/go-sqlite3.svg?branch=master)](https://travis-ci.org/mattn/go-sqlite3)
[![Coverage Status](https://coveralls.io/repos/mattn/go-sqlite3/badge.svg?branch=master)](https://coveralls.io/r/mattn/go-sqlite3?branch=master)
-[![GoDoc](https://godoc.org/github.com/mattn/go-sqlite3?status.svg)](http://godoc.org/github.com/mattn/go-sqlite3)
+[![Go Report Card](https://goreportcard.com/badge/github.com/mattn/go-sqlite3)](https://goreportcard.com/report/github.com/mattn/go-sqlite3)
Description
-----------
@@ -45,6 +46,8 @@ FAQ
Use `go build --tags "icu"`
+ Available extensions: `json1`, `fts5`, `icu`
+
* Can't build go-sqlite3 on windows 64bit.
> Probably, you are using go 1.0, go1.0 has a problem when it comes to compiling/linking on windows 64bit.
diff --git a/vendor/github.com/mattn/go-sqlite3/error.go b/vendor/github.com/mattn/go-sqlite3/error.go
index 1f14aba..49ab890 100644
--- a/vendor/github.com/mattn/go-sqlite3/error.go
+++ b/vendor/github.com/mattn/go-sqlite3/error.go
@@ -71,7 +71,6 @@ func (err ErrNoExtended) Error() string {
return Error{Code: ErrNo(C.int(err) & ErrNoMask), ExtendedCode: err}.Error()
}
-// Error return error message.
func (err Error) Error() string {
if err.err != "" {
return err.err
diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c
index 7a852e6..825e7d8 100644
--- a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c
+++ b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.c
@@ -1,7 +1,8 @@
#ifndef USE_LIBSQLITE3
+#define SQLITE_DISABLE_INTRINSIC 1
/******************************************************************************
** This file is an amalgamation of many separate C source files from SQLite
-** version 3.15.1. By combining all the individual C code files into this
+** version 3.17.0. By combining all the individual C code files into this
** single large file, the entire code can be compiled as a single translation
** unit. This allows many compilers to do optimizations that would not be
** possible if the files were compiled separately. Performance improvements
@@ -205,12 +206,28 @@
# define _LARGEFILE_SOURCE 1
#endif
-/* What version of GCC is being used. 0 means GCC is not being used */
-#ifdef __GNUC__
+/* The GCC_VERSION, CLANG_VERSION, and MSVC_VERSION macros are used to
+** conditionally include optimizations for each of these compilers. A
+** value of 0 means that compiler is not being used. The
+** SQLITE_DISABLE_INTRINSIC macro means do not use any compiler-specific
+** optimizations, and hence set all compiler macros to 0
+*/
+#if defined(__GNUC__) && !defined(SQLITE_DISABLE_INTRINSIC)
# define GCC_VERSION (__GNUC__*1000000+__GNUC_MINOR__*1000+__GNUC_PATCHLEVEL__)
#else
# define GCC_VERSION 0
#endif
+#if defined(__clang__) && !defined(_WIN32) && !defined(SQLITE_DISABLE_INTRINSIC)
+# define CLANG_VERSION \
+ (__clang_major__*1000000+__clang_minor__*1000+__clang_patchlevel__)
+#else
+# define CLANG_VERSION 0
+#endif
+#if defined(_MSC_VER) && !defined(SQLITE_DISABLE_INTRINSIC)
+# define MSVC_VERSION _MSC_VER
+#else
+# define MSVC_VERSION 0
+#endif
/* Needed for various definitions... */
#if defined(__GNUC__) && !defined(_GNU_SOURCE)
@@ -382,13 +399,13 @@ extern "C" {
** [sqlite3_libversion_number()], [sqlite3_sourceid()],
** [sqlite_version()] and [sqlite_source_id()].
*/
-#define SQLITE_VERSION "3.15.1"
-#define SQLITE_VERSION_NUMBER 3015001
-#define SQLITE_SOURCE_ID "2016-11-04 12:08:49 1136863c76576110e710dd5d69ab6bf347c65e36"
+#define SQLITE_VERSION "3.17.0"
+#define SQLITE_VERSION_NUMBER 3017000
+#define SQLITE_SOURCE_ID "2017-02-13 16:02:40 ada05cfa86ad7f5645450ac7a2a21c9aa6e57d2c"
/*
** CAPI3REF: Run-Time Library Version Numbers
-** KEYWORDS: sqlite3_version, sqlite3_sourceid
+** KEYWORDS: sqlite3_version sqlite3_sourceid
**
** These interfaces provide the same information as the [SQLITE_VERSION],
** [SQLITE_VERSION_NUMBER], and [SQLITE_SOURCE_ID] C preprocessor macros
@@ -520,7 +537,11 @@ typedef struct sqlite3 sqlite3;
*/
#ifdef SQLITE_INT64_TYPE
typedef SQLITE_INT64_TYPE sqlite_int64;
- typedef unsigned SQLITE_INT64_TYPE sqlite_uint64;
+# ifdef SQLITE_UINT64_TYPE
+ typedef SQLITE_UINT64_TYPE sqlite_uint64;
+# else
+ typedef unsigned SQLITE_INT64_TYPE sqlite_uint64;
+# endif
#elif defined(_MSC_VER) || defined(__BORLANDC__)
typedef __int64 sqlite_int64;
typedef unsigned __int64 sqlite_uint64;
@@ -833,7 +854,7 @@ SQLITE_API int sqlite3_exec(
** file that were written at the application level might have changed
** and that adjacent bytes, even bytes within the same sector are
** guaranteed to be unchanged. The SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN
-** flag indicate that a file cannot be deleted when open. The
+** flag indicates that a file cannot be deleted when open. The
** SQLITE_IOCAP_IMMUTABLE flag indicates that the file is on
** read-only media and cannot be changed even by processes with
** elevated privileges.
@@ -983,6 +1004,9 @@ struct sqlite3_file {
** <li> [SQLITE_IOCAP_ATOMIC64K]
** <li> [SQLITE_IOCAP_SAFE_APPEND]
** <li> [SQLITE_IOCAP_SEQUENTIAL]
+** <li> [SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN]
+** <li> [SQLITE_IOCAP_POWERSAFE_OVERWRITE]
+** <li> [SQLITE_IOCAP_IMMUTABLE]
** </ul>
**
** The SQLITE_IOCAP_ATOMIC property means that all writes of
@@ -1296,6 +1320,7 @@ struct sqlite3_io_methods {
#define SQLITE_FCNTL_VFS_POINTER 27
#define SQLITE_FCNTL_JOURNAL_POINTER 28
#define SQLITE_FCNTL_WIN32_GET_HANDLE 29
+#define SQLITE_FCNTL_PDB 30
/* deprecated names */
#define SQLITE_GET_LOCKPROXYFILE SQLITE_FCNTL_GET_LOCKPROXYFILE
@@ -2248,6 +2273,18 @@ struct sqlite3_mem_methods {
** until after the database connection closes.
** </dd>
**
+** <dt>SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE</dt>
+** <dd> Usually, when a database in wal mode is closed or detached from a
+** database handle, SQLite checks if this will mean that there are now no
+** connections at all to the database. If so, it performs a checkpoint
+** operation before closing the connection. This option may be used to
+** override this behaviour. The first parameter passed to this operation
+** is an integer - non-zero to disable checkpoints-on-close, or zero (the
+** default) to enable them. The second parameter is a pointer to an integer
+** into which is written 0 or 1 to indicate whether checkpoints-on-close
+** have been disabled - 0 if they are not disabled, 1 if they are.
+** </dd>
+**
** </dl>
*/
#define SQLITE_DBCONFIG_MAINDBNAME 1000 /* const char* */
@@ -2256,6 +2293,7 @@ struct sqlite3_mem_methods {
#define SQLITE_DBCONFIG_ENABLE_TRIGGER 1003 /* int int* */
#define SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER 1004 /* int int* */
#define SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION 1005 /* int int* */
+#define SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE 1006 /* int int* */
/*
@@ -3857,6 +3895,10 @@ SQLITE_API char *sqlite3_expanded_sql(sqlite3_stmt *pStmt);
** sqlite3_stmt_readonly() to return true since, while those statements
** change the configuration of a database connection, they do not make
** changes to the content of the database files on disk.
+** ^The sqlite3_stmt_readonly() interface returns true for [BEGIN] since
+** [BEGIN] merely sets internal flags, but the [BEGIN|BEGIN IMMEDIATE] and
+** [BEGIN|BEGIN EXCLUSIVE] commands do touch the database and so
+** sqlite3_stmt_readonly() returns false for those commands.
*/
SQLITE_API int sqlite3_stmt_readonly(sqlite3_stmt *pStmt);
@@ -4139,8 +4181,12 @@ SQLITE_API int sqlite3_clear_bindings(sqlite3_stmt*);
** METHOD: sqlite3_stmt
**
** ^Return the number of columns in the result set returned by the
-** [prepared statement]. ^This routine returns 0 if pStmt is an SQL
-** statement that does not return data (for example an [UPDATE]).
+** [prepared statement]. ^If this routine returns 0, that means the
+** [prepared statement] returns no data (for example an [UPDATE]).
+** ^However, just because this routine returns a positive number does not
+** mean that one or more rows of data will be returned. ^A SELECT statement
+** will always have a positive sqlite3_column_count() but depending on the
+** WHERE clause constraints and the table content, it might return no rows.
**
** See also: [sqlite3_data_count()]
*/
@@ -5649,7 +5695,7 @@ SQLITE_API void *sqlite3_rollback_hook(sqlite3*, void(*)(void *), void*);
** ^The update hook is not invoked when [WITHOUT ROWID] tables are modified.
**
** ^In the current implementation, the update hook
-** is not invoked when duplication rows are deleted because of an
+** is not invoked when conflicting rows are deleted because of an
** [ON CONFLICT | ON CONFLICT REPLACE] clause. ^Nor is the update hook
** invoked when rows are deleted using the [truncate optimization].
** The exceptions defined in this paragraph might change in a future
@@ -6431,6 +6477,12 @@ typedef struct sqlite3_blob sqlite3_blob;
** [database connection] error code and message accessible via
** [sqlite3_errcode()] and [sqlite3_errmsg()] and related functions.
**
+** A BLOB referenced by sqlite3_blob_open() may be read using the
+** [sqlite3_blob_read()] interface and modified by using
+** [sqlite3_blob_write()]. The [BLOB handle] can be moved to a
+** different row of the same table using the [sqlite3_blob_reopen()]
+** interface. However, the column, table, or database of a [BLOB handle]
+** cannot be changed after the [BLOB handle] is opened.
**
** ^(If the row that a BLOB handle points to is modified by an
** [UPDATE], [DELETE], or by [ON CONFLICT] side-effects
@@ -6454,6 +6506,10 @@ typedef struct sqlite3_blob sqlite3_blob;
**
** To avoid a resource leak, every open [BLOB handle] should eventually
** be released by a call to [sqlite3_blob_close()].
+**
+** See also: [sqlite3_blob_close()],
+** [sqlite3_blob_reopen()], [sqlite3_blob_read()],
+** [sqlite3_blob_bytes()], [sqlite3_blob_write()].
*/
SQLITE_API int sqlite3_blob_open(
sqlite3*,
@@ -6469,11 +6525,11 @@ SQLITE_API int sqlite3_blob_open(
** CAPI3REF: Move a BLOB Handle to a New Row
** METHOD: sqlite3_blob
**
-** ^This function is used to move an existing blob handle so that it points
+** ^This function is used to move an existing [BLOB handle] so that it points
** to a different row of the same database table. ^The new row is identified
** by the rowid value passed as the second argument. Only the row can be
** changed. ^The database, table and column on which the blob handle is open
-** remain the same. Moving an existing blob handle to a new row can be
+** remain the same. Moving an existing [BLOB handle] to a new row is
** faster than closing the existing handle and opening a new one.
**
** ^(The new row must meet the same criteria as for [sqlite3_blob_open()] -
@@ -8402,7 +8458,7 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3*);
**
** ^The [sqlite3_preupdate_hook()] interface registers a callback function
** that is invoked prior to each [INSERT], [UPDATE], and [DELETE] operation
-** on a [rowid table].
+** on a database table.
** ^At most one preupdate hook may be registered at a time on a single
** [database connection]; each call to [sqlite3_preupdate_hook()] overrides
** the previous setting.
@@ -8411,9 +8467,9 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3*);
** ^The third parameter to [sqlite3_preupdate_hook()] is passed through as
** the first parameter to callbacks.
**
-** ^The preupdate hook only fires for changes to [rowid tables]; the preupdate
-** hook is not invoked for changes to [virtual tables] or [WITHOUT ROWID]
-** tables.
+** ^The preupdate hook only fires for changes to real database tables; the
+** preupdate hook is not invoked for changes to [virtual tables] or to
+** system tables like sqlite_master or sqlite_stat1.
**
** ^The second parameter to the preupdate callback is a pointer to
** the [database connection] that registered the preupdate hook.
@@ -8427,12 +8483,16 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3*);
** databases.)^
** ^The fifth parameter to the preupdate callback is the name of the
** table that is being modified.
-** ^The sixth parameter to the preupdate callback is the initial [rowid] of the
-** row being changes for SQLITE_UPDATE and SQLITE_DELETE changes and is
-** undefined for SQLITE_INSERT changes.
-** ^The seventh parameter to the preupdate callback is the final [rowid] of
-** the row being changed for SQLITE_UPDATE and SQLITE_INSERT changes and is
-** undefined for SQLITE_DELETE changes.
+**
+** For an UPDATE or DELETE operation on a [rowid table], the sixth
+** parameter passed to the preupdate callback is the initial [rowid] of the
+** row being modified or deleted. For an INSERT operation on a rowid table,
+** or any operation on a WITHOUT ROWID table, the value of the sixth
+** parameter is undefined. For an INSERT or UPDATE on a rowid table the
+** seventh parameter is the final rowid value of the row being inserted
+** or updated. The value of the seventh parameter passed to the callback
+** function is not defined for operations on WITHOUT ROWID tables, or for
+** INSERT operations on rowid tables.
**
** The [sqlite3_preupdate_old()], [sqlite3_preupdate_new()],
** [sqlite3_preupdate_count()], and [sqlite3_preupdate_depth()] interfaces
@@ -8472,7 +8532,8 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3*);
**
** See also: [sqlite3_update_hook()]
*/
-SQLITE_API SQLITE_EXPERIMENTAL void *sqlite3_preupdate_hook(
+#if defined(SQLITE_ENABLE_PREUPDATE_HOOK)
+SQLITE_API void *sqlite3_preupdate_hook(
sqlite3 *db,
void(*xPreUpdate)(
void *pCtx, /* Copy of third arg to preupdate_hook() */
@@ -8485,10 +8546,11 @@ SQLITE_API SQLITE_EXPERIMENTAL void *sqlite3_preupdate_hook(
),
void*
);
-SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_preupdate_old(sqlite3 *, int, sqlite3_value **);
-SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_preupdate_count(sqlite3 *);
-SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_preupdate_depth(sqlite3 *);
-SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_preupdate_new(sqlite3 *, int, sqlite3_value **);
+SQLITE_API int sqlite3_preupdate_old(sqlite3 *, int, sqlite3_value **);
+SQLITE_API int sqlite3_preupdate_count(sqlite3 *);
+SQLITE_API int sqlite3_preupdate_depth(sqlite3 *);
+SQLITE_API int sqlite3_preupdate_new(sqlite3 *, int, sqlite3_value **);
+#endif
/*
** CAPI3REF: Low-level system error code
@@ -8504,7 +8566,7 @@ SQLITE_API int sqlite3_system_errno(sqlite3*);
/*
** CAPI3REF: Database Snapshot
-** KEYWORDS: {snapshot}
+** KEYWORDS: {snapshot} {sqlite3_snapshot}
** EXPERIMENTAL
**
** An instance of the snapshot object records the state of a [WAL mode]
@@ -8528,7 +8590,9 @@ SQLITE_API int sqlite3_system_errno(sqlite3*);
** to an historical snapshot (if possible). The destructor for
** sqlite3_snapshot objects is [sqlite3_snapshot_free()].
*/
-typedef struct sqlite3_snapshot sqlite3_snapshot;
+typedef struct sqlite3_snapshot {
+ unsigned char hidden[48];
+} sqlite3_snapshot;
/*
** CAPI3REF: Record A Database Snapshot
@@ -8539,9 +8603,32 @@ typedef struct sqlite3_snapshot sqlite3_snapshot;
** schema S in database connection D. ^On success, the
** [sqlite3_snapshot_get(D,S,P)] interface writes a pointer to the newly
** created [sqlite3_snapshot] object into *P and returns SQLITE_OK.
-** ^If schema S of [database connection] D is not a [WAL mode] database
-** that is in a read transaction, then [sqlite3_snapshot_get(D,S,P)]
-** leaves the *P value unchanged and returns an appropriate [error code].
+** If there is not already a read-transaction open on schema S when
+** this function is called, one is opened automatically.
+**
+** The following must be true for this function to succeed. If any of
+** the following statements are false when sqlite3_snapshot_get() is
+** called, SQLITE_ERROR is returned. The final value of *P is undefined
+** in this case.
+**
+** <ul>
+** <li> The database handle must be in [autocommit mode].
+**
+** <li> Schema S of [database connection] D must be a [WAL mode] database.
+**
+** <li> There must not be a write transaction open on schema S of database
+** connection D.
+**
+** <li> One or more transactions must have been written to the current wal
+** file since it was created on disk (by any connection). This means
+** that a snapshot cannot be taken on a wal mode database with no wal
+** file immediately after it is first opened. At least one transaction
+** must be written to it first.
+** </ul>
+**
+** This function may also return SQLITE_NOMEM. If it is called with the
+** database handle in autocommit mode but fails for some other reason,
+** whether or not a read transaction is opened on schema S is undefined.
**
** The [sqlite3_snapshot] object returned from a successful call to
** [sqlite3_snapshot_get()] must be freed using [sqlite3_snapshot_free()]
@@ -8635,6 +8722,28 @@ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_cmp(
);
/*
+** CAPI3REF: Recover snapshots from a wal file
+** EXPERIMENTAL
+**
+** If all connections disconnect from a database file but do not perform
+** a checkpoint, the existing wal file is opened along with the database
+** file the next time the database is opened. At this point it is only
+** possible to successfully call sqlite3_snapshot_open() to open the most
+** recent snapshot of the database (the one at the head of the wal file),
+** even though the wal file may contain other valid snapshots for which
+** clients have sqlite3_snapshot handles.
+**
+** This function attempts to scan the wal file associated with database zDb
+** of database handle db and make all valid snapshots available to
+** sqlite3_snapshot_open(). It is an error if there is already a read
+** transaction open on the database, or if the database is not a wal mode
+** database.
+**
+** SQLITE_OK is returned if successful, or an SQLite error code otherwise.
+*/
+SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_recover(sqlite3 *db, const char *zDb);
+
+/*
** Undo the hack that converts floating point types to integer for
** builds on processors without floating point support.
*/
@@ -8819,7 +8928,7 @@ typedef struct sqlite3_changeset_iter sqlite3_changeset_iter;
** attached database. It is not an error if database zDb is not attached
** to the database when the session object is created.
*/
-int sqlite3session_create(
+SQLITE_API int sqlite3session_create(
sqlite3 *db, /* Database handle */
const char *zDb, /* Name of db (e.g. "main") */
sqlite3_session **ppSession /* OUT: New session object */
@@ -8837,7 +8946,7 @@ int sqlite3session_create(
** are attached is closed. Refer to the documentation for
** [sqlite3session_create()] for details.
*/
-void sqlite3session_delete(sqlite3_session *pSession);
+SQLITE_API void sqlite3session_delete(sqlite3_session *pSession);
/*
@@ -8857,7 +8966,7 @@ void sqlite3session_delete(sqlite3_session *pSession);
** The return value indicates the final state of the session object: 0 if
** the session is disabled, or 1 if it is enabled.
*/
-int sqlite3session_enable(sqlite3_session *pSession, int bEnable);
+SQLITE_API int sqlite3session_enable(sqlite3_session *pSession, int bEnable);
/*
** CAPI3REF: Set Or Clear the Indirect Change Flag
@@ -8886,7 +8995,7 @@ int sqlite3session_enable(sqlite3_session *pSession, int bEnable);
** The return value indicates the final state of the indirect flag: 0 if
** it is clear, or 1 if it is set.
*/
-int sqlite3session_indirect(sqlite3_session *pSession, int bIndirect);
+SQLITE_API int sqlite3session_indirect(sqlite3_session *pSession, int bIndirect);
/*
** CAPI3REF: Attach A Table To A Session Object
@@ -8916,7 +9025,7 @@ int sqlite3session_indirect(sqlite3_session *pSession, int bIndirect);
** SQLITE_OK is returned if the call completes without error. Or, if an error
** occurs, an SQLite error code (e.g. SQLITE_NOMEM) is returned.
*/
-int sqlite3session_attach(
+SQLITE_API int sqlite3session_attach(
sqlite3_session *pSession, /* Session object */
const char *zTab /* Table name */
);
@@ -8930,7 +9039,7 @@ int sqlite3session_attach(
** If xFilter returns 0, changes is not tracked. Note that once a table is
** attached, xFilter will not be called again.
*/
-void sqlite3session_table_filter(
+SQLITE_API void sqlite3session_table_filter(
sqlite3_session *pSession, /* Session object */
int(*xFilter)(
void *pCtx, /* Copy of third arg to _filter_table() */
@@ -9043,7 +9152,7 @@ void sqlite3session_table_filter(
** another field of the same row is updated while the session is enabled, the
** resulting changeset will contain an UPDATE change that updates both fields.
*/
-int sqlite3session_changeset(
+SQLITE_API int sqlite3session_changeset(
sqlite3_session *pSession, /* Session object */
int *pnChangeset, /* OUT: Size of buffer at *ppChangeset */
void **ppChangeset /* OUT: Buffer containing changeset */
@@ -9087,7 +9196,8 @@ int sqlite3session_changeset(
** the from-table, a DELETE record is added to the session object.
**
** <li> For each row (primary key) that exists in both tables, but features
-** different in each, an UPDATE record is added to the session.
+** different non-PK values in each, an UPDATE record is added to the
+** session.
** </ul>
**
** To clarify, if this function is called and then a changeset constructed
@@ -9104,7 +9214,7 @@ int sqlite3session_changeset(
** message. It is the responsibility of the caller to free this buffer using
** sqlite3_free().
*/
-int sqlite3session_diff(
+SQLITE_API int sqlite3session_diff(
sqlite3_session *pSession,
const char *zFromDb,
const char *zTbl,
@@ -9140,7 +9250,7 @@ int sqlite3session_diff(
** a single table are grouped together, tables appear in the order in which
** they were attached to the session object).
*/
-int sqlite3session_patchset(
+SQLITE_API int sqlite3session_patchset(
sqlite3_session *pSession, /* Session object */
int *pnPatchset, /* OUT: Size of buffer at *ppChangeset */
void **ppPatchset /* OUT: Buffer containing changeset */
@@ -9161,7 +9271,7 @@ int sqlite3session_patchset(
** guaranteed that a call to sqlite3session_changeset() will return a
** changeset containing zero changes.
*/
-int sqlite3session_isempty(sqlite3_session *pSession);
+SQLITE_API int sqlite3session_isempty(sqlite3_session *pSession);
/*
** CAPI3REF: Create An Iterator To Traverse A Changeset
@@ -9196,7 +9306,7 @@ int sqlite3session_isempty(sqlite3_session *pSession);
** the applies to table X, then one for table Y, and then later on visit
** another change for table X.
*/
-int sqlite3changeset_start(
+SQLITE_API int sqlite3changeset_start(
sqlite3_changeset_iter **pp, /* OUT: New changeset iterator handle */
int nChangeset, /* Size of changeset blob in bytes */
void *pChangeset /* Pointer to blob containing changeset */
@@ -9225,7 +9335,7 @@ int sqlite3changeset_start(
** codes include SQLITE_CORRUPT (if the changeset buffer is corrupt) or
** SQLITE_NOMEM.
*/
-int sqlite3changeset_next(sqlite3_changeset_iter *pIter);
+SQLITE_API int sqlite3changeset_next(sqlite3_changeset_iter *pIter);
/*
** CAPI3REF: Obtain The Current Operation From A Changeset Iterator
@@ -9253,7 +9363,7 @@ int sqlite3changeset_next(sqlite3_changeset_iter *pIter);
** SQLite error code is returned. The values of the output variables may not
** be trusted in this case.
*/
-int sqlite3changeset_op(
+SQLITE_API int sqlite3changeset_op(
sqlite3_changeset_iter *pIter, /* Iterator object */
const char **pzTab, /* OUT: Pointer to table name */
int *pnCol, /* OUT: Number of columns in table */
@@ -9286,7 +9396,7 @@ int sqlite3changeset_op(
** SQLITE_OK is returned and the output variables populated as described
** above.
*/
-int sqlite3changeset_pk(
+SQLITE_API int sqlite3changeset_pk(
sqlite3_changeset_iter *pIter, /* Iterator object */
unsigned char **pabPK, /* OUT: Array of boolean - true for PK cols */
int *pnCol /* OUT: Number of entries in output array */
@@ -9316,7 +9426,7 @@ int sqlite3changeset_pk(
** If some other error occurs (e.g. an OOM condition), an SQLite error code
** is returned and *ppValue is set to NULL.
*/
-int sqlite3changeset_old(
+SQLITE_API int sqlite3changeset_old(
sqlite3_changeset_iter *pIter, /* Changeset iterator */
int iVal, /* Column number */
sqlite3_value **ppValue /* OUT: Old value (or NULL pointer) */
@@ -9349,7 +9459,7 @@ int sqlite3changeset_old(
** If some other error occurs (e.g. an OOM condition), an SQLite error code
** is returned and *ppValue is set to NULL.
*/
-int sqlite3changeset_new(
+SQLITE_API int sqlite3changeset_new(
sqlite3_changeset_iter *pIter, /* Changeset iterator */
int iVal, /* Column number */
sqlite3_value **ppValue /* OUT: New value (or NULL pointer) */
@@ -9376,7 +9486,7 @@ int sqlite3changeset_new(
** If some other error occurs (e.g. an OOM condition), an SQLite error code
** is returned and *ppValue is set to NULL.
*/
-int sqlite3changeset_conflict(
+SQLITE_API int sqlite3changeset_conflict(
sqlite3_changeset_iter *pIter, /* Changeset iterator */
int iVal, /* Column number */
sqlite3_value **ppValue /* OUT: Value from conflicting row */
@@ -9392,7 +9502,7 @@ int sqlite3changeset_conflict(
**
** In all other cases this function returns SQLITE_MISUSE.
*/
-int sqlite3changeset_fk_conflicts(
+SQLITE_API int sqlite3changeset_fk_conflicts(
sqlite3_changeset_iter *pIter, /* Changeset iterator */
int *pnOut /* OUT: Number of FK violations */
);
@@ -9425,7 +9535,7 @@ int sqlite3changeset_fk_conflicts(
** // An error has occurred
** }
*/
-int sqlite3changeset_finalize(sqlite3_changeset_iter *pIter);
+SQLITE_API int sqlite3changeset_finalize(sqlite3_changeset_iter *pIter);
/*
** CAPI3REF: Invert A Changeset
@@ -9455,7 +9565,7 @@ int sqlite3changeset_finalize(sqlite3_changeset_iter *pIter);
** WARNING/TODO: This function currently assumes that the input is a valid
** changeset. If it is not, the results are undefined.
*/
-int sqlite3changeset_invert(
+SQLITE_API int sqlite3changeset_invert(
int nIn, const void *pIn, /* Input changeset */
int *pnOut, void **ppOut /* OUT: Inverse of input */
);
@@ -9484,7 +9594,7 @@ int sqlite3changeset_invert(
**
** Refer to the sqlite3_changegroup documentation below for details.
*/
-int sqlite3changeset_concat(
+SQLITE_API int sqlite3changeset_concat(
int nA, /* Number of bytes in buffer pA */
void *pA, /* Pointer to buffer containing changeset A */
int nB, /* Number of bytes in buffer pB */
@@ -9672,7 +9782,7 @@ void sqlite3changegroup_delete(sqlite3_changegroup*);
** <ul>
** <li> The table has the same name as the name recorded in the
** changeset, and
-** <li> The table has the same number of columns as recorded in the
+** <li> The table has at least as many columns as recorded in the
** changeset, and
** <li> The table has primary key columns in the same position as
** recorded in the changeset.
@@ -9717,7 +9827,11 @@ void sqlite3changegroup_delete(sqlite3_changegroup*);
** If a row with matching primary key values is found, but one or more of
** the non-primary key fields contains a value different from the original
** row value stored in the changeset, the conflict-handler function is
-** invoked with [SQLITE_CHANGESET_DATA] as the second argument.
+** invoked with [SQLITE_CHANGESET_DATA] as the second argument. If the
+** database table has more columns than are recorded in the changeset,
+** only the values of those non-primary key fields are compared against
+** the current database contents - any trailing database table columns
+** are ignored.
**
** If no row with matching primary key values is found in the database,
** the conflict-handler function is invoked with [SQLITE_CHANGESET_NOTFOUND]
@@ -9732,7 +9846,9 @@ void sqlite3changegroup_delete(sqlite3_changegroup*);
**
** <dt>INSERT Changes<dd>
** For each INSERT change, an attempt is made to insert the new row into
-** the database.
+** the database. If the changeset row contains fewer fields than the
+** database table, the trailing fields are populated with their default
+** values.
**
** If the attempt to insert the row fails because the database already
** contains a row with the same primary key values, the conflict handler
@@ -9750,13 +9866,13 @@ void sqlite3changegroup_delete(sqlite3_changegroup*);
** For each UPDATE change, this function checks if the target database
** contains a row with the same primary key value (or values) as the
** original row values stored in the changeset. If it does, and the values
-** stored in all non-primary key columns also match the values stored in
-** the changeset the row is updated within the target database.
+** stored in all modified non-primary key columns also match the values
+** stored in the changeset the row is updated within the target database.
**
** If a row with matching primary key values is found, but one or more of
-** the non-primary key fields contains a value different from an original
-** row value stored in the changeset, the conflict-handler function is
-** invoked with [SQLITE_CHANGESET_DATA] as the second argument. Since
+** the modified non-primary key fields contains a value different from an
+** original row value stored in the changeset, the conflict-handler function
+** is invoked with [SQLITE_CHANGESET_DATA] as the second argument. Since
** UPDATE changes only contain values for non-primary key fields that are
** to be modified, only those fields need to match the original values to
** avoid the SQLITE_CHANGESET_DATA conflict-handler callback.
@@ -9784,7 +9900,7 @@ void sqlite3changegroup_delete(sqlite3_changegroup*);
** rolled back, restoring the target database to its original state, and an
** SQLite error code returned.
*/
-int sqlite3changeset_apply(
+SQLITE_API int sqlite3changeset_apply(
sqlite3 *db, /* Apply change to "main" db of this handle */
int nChangeset, /* Size of changeset in bytes */
void *pChangeset, /* Changeset blob */
@@ -9985,7 +10101,7 @@ int sqlite3changeset_apply(
** parameter set to a value less than or equal to zero. Other than this,
** no guarantees are made as to the size of the chunks of data returned.
*/
-int sqlite3changeset_apply_strm(
+SQLITE_API int sqlite3changeset_apply_strm(
sqlite3 *db, /* Apply change to "main" db of this handle */
int (*xInput)(void *pIn, void *pData, int *pnData), /* Input function */
void *pIn, /* First arg for xInput */
@@ -10000,7 +10116,7 @@ int sqlite3changeset_apply_strm(
),
void *pCtx /* First argument passed to xConflict */
);
-int sqlite3changeset_concat_strm(
+SQLITE_API int sqlite3changeset_concat_strm(
int (*xInputA)(void *pIn, void *pData, int *pnData),
void *pInA,
int (*xInputB)(void *pIn, void *pData, int *pnData),
@@ -10008,23 +10124,23 @@ int sqlite3changeset_concat_strm(
int (*xOutput)(void *pOut, const void *pData, int nData),
void *pOut
);
-int sqlite3changeset_invert_strm(
+SQLITE_API int sqlite3changeset_invert_strm(
int (*xInput)(void *pIn, void *pData, int *pnData),
void *pIn,
int (*xOutput)(void *pOut, const void *pData, int nData),
void *pOut
);
-int sqlite3changeset_start_strm(
+SQLITE_API int sqlite3changeset_start_strm(
sqlite3_changeset_iter **pp,
int (*xInput)(void *pIn, void *pData, int *pnData),
void *pIn
);
-int sqlite3session_changeset_strm(
+SQLITE_API int sqlite3session_changeset_strm(
sqlite3_session *pSession,
int (*xOutput)(void *pOut, const void *pData, int nData),
void *pOut
);
-int sqlite3session_patchset_strm(
+SQLITE_API int sqlite3session_patchset_strm(
sqlite3_session *pSession,
int (*xOutput)(void *pOut, const void *pData, int nData),
void *pOut
@@ -10931,6 +11047,7 @@ struct fts5_api {
# include <intrin.h>
# pragma intrinsic(_byteswap_ushort)
# pragma intrinsic(_byteswap_ulong)
+# pragma intrinsic(_byteswap_uint64)
# pragma intrinsic(_ReadWriteBarrier)
# else
# include <cmnintrin.h>
@@ -11470,6 +11587,18 @@ SQLITE_PRIVATE void sqlite3HashClear(Hash*);
#include <stddef.h>
/*
+** Use a macro to replace memcpy() if compiled with SQLITE_INLINE_MEMCPY.
+** This allows better measurements of where memcpy() is used when running
+** cachegrind. But this macro version of memcpy() is very slow so it
+** should not be used in production. This is a performance measurement
+** hack only.
+*/
+#ifdef SQLITE_INLINE_MEMCPY
+# define memcpy(D,S,N) {char*xxd=(char*)(D);const char*xxs=(const char*)(S);\
+ int xxn=(N);while(xxn-->0)*(xxd++)=*(xxs++);}
+#endif
+
+/*
** If compiling for a processor that lacks floating point support,
** substitute integer for floating-point
*/
@@ -11553,9 +11682,12 @@ SQLITE_PRIVATE void sqlite3HashClear(Hash*);
** pagecaches for each database connection. A positive number is the
** number of pages. A negative number N translations means that a buffer
** of -1024*N bytes is allocated and used for as many pages as it will hold.
+**
+** The default value of "20" was choosen to minimize the run-time of the
+** speedtest1 test program with options: --shrink-memory --reprepare
*/
#ifndef SQLITE_DEFAULT_PCACHE_INITSZ
-# define SQLITE_DEFAULT_PCACHE_INITSZ 100
+# define SQLITE_DEFAULT_PCACHE_INITSZ 20
#endif
/*
@@ -11730,32 +11862,35 @@ typedef INT16_TYPE LogEst;
**
** For best performance, an attempt is made to guess at the byte-order
** using C-preprocessor macros. If that is unsuccessful, or if
-** -DSQLITE_RUNTIME_BYTEORDER=1 is set, then byte-order is determined
+** -DSQLITE_BYTEORDER=0 is set, then byte-order is determined
** at run-time.
*/
-#if (defined(i386) || defined(__i386__) || defined(_M_IX86) || \
+#ifndef SQLITE_BYTEORDER
+# if defined(i386) || defined(__i386__) || defined(_M_IX86) || \
defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || \
defined(_M_AMD64) || defined(_M_ARM) || defined(__x86) || \
- defined(__arm__)) && !defined(SQLITE_RUNTIME_BYTEORDER)
-# define SQLITE_BYTEORDER 1234
-# define SQLITE_BIGENDIAN 0
-# define SQLITE_LITTLEENDIAN 1
-# define SQLITE_UTF16NATIVE SQLITE_UTF16LE
+ defined(__arm__)
+# define SQLITE_BYTEORDER 1234
+# elif defined(sparc) || defined(__ppc__)
+# define SQLITE_BYTEORDER 4321
+# else
+# define SQLITE_BYTEORDER 0
+# endif
#endif
-#if (defined(sparc) || defined(__ppc__)) \
- && !defined(SQLITE_RUNTIME_BYTEORDER)
-# define SQLITE_BYTEORDER 4321
+#if SQLITE_BYTEORDER==4321
# define SQLITE_BIGENDIAN 1
# define SQLITE_LITTLEENDIAN 0
# define SQLITE_UTF16NATIVE SQLITE_UTF16BE
-#endif
-#if !defined(SQLITE_BYTEORDER)
+#elif SQLITE_BYTEORDER==1234
+# define SQLITE_BIGENDIAN 0
+# define SQLITE_LITTLEENDIAN 1
+# define SQLITE_UTF16NATIVE SQLITE_UTF16LE
+#else
# ifdef SQLITE_AMALGAMATION
const int sqlite3one = 1;
# else
extern const int sqlite3one;
# endif
-# define SQLITE_BYTEORDER 0 /* 0 means "unknown at compile-time" */
# define SQLITE_BIGENDIAN (*(char *)(&sqlite3one)==0)
# define SQLITE_LITTLEENDIAN (*(char *)(&sqlite3one)==1)
# define SQLITE_UTF16NATIVE (SQLITE_BIGENDIAN?SQLITE_UTF16BE:SQLITE_UTF16LE)
@@ -12012,6 +12147,14 @@ typedef struct Walker Walker;
typedef struct WhereInfo WhereInfo;
typedef struct With With;
+/* A VList object records a mapping between parameters/variables/wildcards
+** in the SQL statement (such as $abc, @pqr, or :xyz) and the integer
+** variable number associated with that parameter. See the format description
+** on the sqlite3VListAdd() routine for more information. A VList is really
+** just an array of integers.
+*/
+typedef int VList;
+
/*
** Defer sourcing vdbe.h and btree.h until after the "u8" and
** "BusyHandler" typedefs. vdbe.h also requires a few of the opaque
@@ -12270,9 +12413,10 @@ SQLITE_PRIVATE int sqlite3BtreeCursorHasMoved(BtCursor*);
SQLITE_PRIVATE int sqlite3BtreeCursorRestore(BtCursor*, int*);
SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor*, u8 flags);
-/* Allowed flags for the 2nd argument to sqlite3BtreeDelete() */
+/* Allowed flags for sqlite3BtreeDelete() and sqlite3BtreeInsert() */
#define BTREE_SAVEPOSITION 0x02 /* Leave cursor pointing at NEXT or PREV */
#define BTREE_AUXDELETE 0x04 /* not the primary delete operation */
+#define BTREE_APPEND 0x08 /* Insert is likely an append */
/* An instance of the BtreePayload object describes the content of a single
** entry in either an index or table btree.
@@ -12296,27 +12440,29 @@ struct BtreePayload {
const void *pKey; /* Key content for indexes. NULL for tables */
sqlite3_int64 nKey; /* Size of pKey for indexes. PRIMARY KEY for tabs */
const void *pData; /* Data for tables. NULL for indexes */
+ struct Mem *aMem; /* First of nMem value in the unpacked pKey */
+ u16 nMem; /* Number of aMem[] value. Might be zero */
int nData; /* Size of pData. 0 if none. */
int nZero; /* Extra zero data appended after pData,nData */
};
SQLITE_PRIVATE int sqlite3BtreeInsert(BtCursor*, const BtreePayload *pPayload,
- int bias, int seekResult);
+ int flags, int seekResult);
SQLITE_PRIVATE int sqlite3BtreeFirst(BtCursor*, int *pRes);
SQLITE_PRIVATE int sqlite3BtreeLast(BtCursor*, int *pRes);
SQLITE_PRIVATE int sqlite3BtreeNext(BtCursor*, int *pRes);
SQLITE_PRIVATE int sqlite3BtreeEof(BtCursor*);
SQLITE_PRIVATE int sqlite3BtreePrevious(BtCursor*, int *pRes);
SQLITE_PRIVATE i64 sqlite3BtreeIntegerKey(BtCursor*);
-SQLITE_PRIVATE int sqlite3BtreeKey(BtCursor*, u32 offset, u32 amt, void*);
+SQLITE_PRIVATE int sqlite3BtreePayload(BtCursor*, u32 offset, u32 amt, void*);
SQLITE_PRIVATE const void *sqlite3BtreePayloadFetch(BtCursor*, u32 *pAmt);
SQLITE_PRIVATE u32 sqlite3BtreePayloadSize(BtCursor*);
-SQLITE_PRIVATE int sqlite3BtreeData(BtCursor*, u32 offset, u32 amt, void*);
SQLITE_PRIVATE char *sqlite3BtreeIntegrityCheck(Btree*, int *aRoot, int nRoot, int, int*);
SQLITE_PRIVATE struct Pager *sqlite3BtreePager(Btree*);
#ifndef SQLITE_OMIT_INCRBLOB
+SQLITE_PRIVATE int sqlite3BtreePayloadChecked(BtCursor*, u32 offset, u32 amt, void*);
SQLITE_PRIVATE int sqlite3BtreePutData(BtCursor*, u32 offset, u32 amt, void*);
SQLITE_PRIVATE void sqlite3BtreeIncrblobCursor(BtCursor *);
#endif
@@ -12329,6 +12475,7 @@ SQLITE_PRIVATE int sqlite3HeaderSizeBtree(void);
#ifndef NDEBUG
SQLITE_PRIVATE int sqlite3BtreeCursorIsValid(BtCursor*);
#endif
+SQLITE_PRIVATE int sqlite3BtreeCursorIsValidNN(BtCursor*);
#ifndef SQLITE_OMIT_BTREECOUNT
SQLITE_PRIVATE int sqlite3BtreeCount(BtCursor *, i64 *);
@@ -12433,8 +12580,7 @@ typedef struct SubProgram SubProgram;
struct VdbeOp {
u8 opcode; /* What operation to perform */
signed char p4type; /* One of the P4_xxx constants for p4 */
- u8 notUsed1;
- u8 p5; /* Fifth parameter is an unsigned character */
+ u16 p5; /* Fifth parameter is an unsigned 16-bit integer */
int p1; /* First operand */
int p2; /* Second parameter (often the jump destination) */
int p3; /* The third parameter */
@@ -12502,22 +12648,21 @@ typedef struct VdbeOpList VdbeOpList;
#define P4_NOTUSED 0 /* The P4 parameter is not used */
#define P4_DYNAMIC (-1) /* Pointer to a string obtained from sqliteMalloc() */
#define P4_STATIC (-2) /* Pointer to a static string */
-#define P4_COLLSEQ (-4) /* P4 is a pointer to a CollSeq structure */
-#define P4_FUNCDEF (-5) /* P4 is a pointer to a FuncDef structure */
-#define P4_KEYINFO (-6) /* P4 is a pointer to a KeyInfo structure */
-#define P4_EXPR (-7) /* P4 is a pointer to an Expr tree */
-#define P4_MEM (-8) /* P4 is a pointer to a Mem* structure */
+#define P4_COLLSEQ (-3) /* P4 is a pointer to a CollSeq structure */
+#define P4_FUNCDEF (-4) /* P4 is a pointer to a FuncDef structure */
+#define P4_KEYINFO (-5) /* P4 is a pointer to a KeyInfo structure */
+#define P4_EXPR (-6) /* P4 is a pointer to an Expr tree */
+#define P4_MEM (-7) /* P4 is a pointer to a Mem* structure */
#define P4_TRANSIENT 0 /* P4 is a pointer to a transient string */
-#define P4_VTAB (-10) /* P4 is a pointer to an sqlite3_vtab structure */
-#define P4_MPRINTF (-11) /* P4 is a string obtained from sqlite3_mprintf() */
-#define P4_REAL (-12) /* P4 is a 64-bit floating point value */
-#define P4_INT64 (-13) /* P4 is a 64-bit signed integer */
-#define P4_INT32 (-14) /* P4 is a 32-bit signed integer */
-#define P4_INTARRAY (-15) /* P4 is a vector of 32-bit integers */
-#define P4_SUBPROGRAM (-18) /* P4 is a pointer to a SubProgram structure */
-#define P4_ADVANCE (-19) /* P4 is a pointer to BtreeNext() or BtreePrev() */
-#define P4_TABLE (-20) /* P4 is a pointer to a Table structure */
-#define P4_FUNCCTX (-21) /* P4 is a pointer to an sqlite3_context object */
+#define P4_VTAB (-8) /* P4 is a pointer to an sqlite3_vtab structure */
+#define P4_REAL (-9) /* P4 is a 64-bit floating point value */
+#define P4_INT64 (-10) /* P4 is a 64-bit signed integer */
+#define P4_INT32 (-11) /* P4 is a 32-bit signed integer */
+#define P4_INTARRAY (-12) /* P4 is a vector of 32-bit integers */
+#define P4_SUBPROGRAM (-13) /* P4 is a pointer to a SubProgram structure */
+#define P4_ADVANCE (-14) /* P4 is a pointer to BtreeNext() or BtreePrev() */
+#define P4_TABLE (-15) /* P4 is a pointer to a Table structure */
+#define P4_FUNCCTX (-16) /* P4 is a pointer to an sqlite3_context object */
/* Error message codes for OP_Halt */
#define P5_ConstraintNotNull 1
@@ -12627,7 +12772,7 @@ typedef struct VdbeOpList VdbeOpList;
#define OP_Program 64
#define OP_FkIfZero 65 /* synopsis: if fkctr[P1]==0 goto P2 */
#define OP_IfPos 66 /* synopsis: if r[P1]>0 then r[P1]-=P3, goto P2 */
-#define OP_IfNotZero 67 /* synopsis: if r[P1]!=0 then r[P1]-=P3, goto P2 */
+#define OP_IfNotZero 67 /* synopsis: if r[P1]!=0 then r[P1]--, goto P2 */
#define OP_DecrJumpZero 68 /* synopsis: if (--r[P1])==0 goto P2 */
#define OP_IncrVacuum 69
#define OP_VNext 70
@@ -12681,48 +12826,47 @@ typedef struct VdbeOpList VdbeOpList;
#define OP_ResetCount 118
#define OP_SorterCompare 119 /* synopsis: if key(P1)!=trim(r[P3],P4) goto P2 */
#define OP_SorterData 120 /* synopsis: r[P2]=data */
-#define OP_RowKey 121 /* synopsis: r[P2]=key */
-#define OP_RowData 122 /* synopsis: r[P2]=data */
-#define OP_Rowid 123 /* synopsis: r[P2]=rowid */
-#define OP_NullRow 124
-#define OP_SorterInsert 125
-#define OP_IdxInsert 126 /* synopsis: key=r[P2] */
-#define OP_IdxDelete 127 /* synopsis: key=r[P2@P3] */
-#define OP_Seek 128 /* synopsis: Move P3 to P1.rowid */
-#define OP_IdxRowid 129 /* synopsis: r[P2]=rowid */
-#define OP_Destroy 130
-#define OP_Clear 131
+#define OP_RowData 121 /* synopsis: r[P2]=data */
+#define OP_Rowid 122 /* synopsis: r[P2]=rowid */
+#define OP_NullRow 123
+#define OP_SorterInsert 124 /* synopsis: key=r[P2] */
+#define OP_IdxInsert 125 /* synopsis: key=r[P2] */
+#define OP_IdxDelete 126 /* synopsis: key=r[P2@P3] */
+#define OP_Seek 127 /* synopsis: Move P3 to P1.rowid */
+#define OP_IdxRowid 128 /* synopsis: r[P2]=rowid */
+#define OP_Destroy 129
+#define OP_Clear 130
+#define OP_ResetSorter 131
#define OP_Real 132 /* same as TK_FLOAT, synopsis: r[P2]=P4 */
-#define OP_ResetSorter 133
-#define OP_CreateIndex 134 /* synopsis: r[P2]=root iDb=P1 */
-#define OP_CreateTable 135 /* synopsis: r[P2]=root iDb=P1 */
-#define OP_ParseSchema 136
-#define OP_LoadAnalysis 137
-#define OP_DropTable 138
-#define OP_DropIndex 139
-#define OP_DropTrigger 140
-#define OP_IntegrityCk 141
-#define OP_RowSetAdd 142 /* synopsis: rowset(P1)=r[P2] */
-#define OP_Param 143
-#define OP_FkCounter 144 /* synopsis: fkctr[P1]+=P2 */
-#define OP_MemMax 145 /* synopsis: r[P1]=max(r[P1],r[P2]) */
-#define OP_OffsetLimit 146 /* synopsis: if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1) */
-#define OP_AggStep0 147 /* synopsis: accum=r[P3] step(r[P2@P5]) */
-#define OP_AggStep 148 /* synopsis: accum=r[P3] step(r[P2@P5]) */
-#define OP_AggFinal 149 /* synopsis: accum=r[P1] N=P2 */
-#define OP_Expire 150
-#define OP_TableLock 151 /* synopsis: iDb=P1 root=P2 write=P3 */
-#define OP_VBegin 152
-#define OP_VCreate 153
-#define OP_VDestroy 154
-#define OP_VOpen 155
-#define OP_VColumn 156 /* synopsis: r[P3]=vcolumn(P2) */
-#define OP_VRename 157
-#define OP_Pagecount 158
-#define OP_MaxPgcnt 159
-#define OP_CursorHint 160
-#define OP_Noop 161
-#define OP_Explain 162
+#define OP_CreateIndex 133 /* synopsis: r[P2]=root iDb=P1 */
+#define OP_CreateTable 134 /* synopsis: r[P2]=root iDb=P1 */
+#define OP_ParseSchema 135
+#define OP_LoadAnalysis 136
+#define OP_DropTable 137
+#define OP_DropIndex 138
+#define OP_DropTrigger 139
+#define OP_IntegrityCk 140
+#define OP_RowSetAdd 141 /* synopsis: rowset(P1)=r[P2] */
+#define OP_Param 142
+#define OP_FkCounter 143 /* synopsis: fkctr[P1]+=P2 */
+#define OP_MemMax 144 /* synopsis: r[P1]=max(r[P1],r[P2]) */
+#define OP_OffsetLimit 145 /* synopsis: if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1) */
+#define OP_AggStep0 146 /* synopsis: accum=r[P3] step(r[P2@P5]) */
+#define OP_AggStep 147 /* synopsis: accum=r[P3] step(r[P2@P5]) */
+#define OP_AggFinal 148 /* synopsis: accum=r[P1] N=P2 */
+#define OP_Expire 149
+#define OP_TableLock 150 /* synopsis: iDb=P1 root=P2 write=P3 */
+#define OP_VBegin 151
+#define OP_VCreate 152
+#define OP_VDestroy 153
+#define OP_VOpen 154
+#define OP_VColumn 155 /* synopsis: r[P3]=vcolumn(P2) */
+#define OP_VRename 156
+#define OP_Pagecount 157
+#define OP_MaxPgcnt 158
+#define OP_CursorHint 159
+#define OP_Noop 160
+#define OP_Explain 161
/* Properties such as "out2" or "jump" that are specified in
** comments following the "case" for each opcode in the vdbe.c
@@ -12750,12 +12894,12 @@ typedef struct VdbeOpList VdbeOpList;
/* 96 */ 0x00, 0x10, 0x00, 0x00, 0x10, 0x10, 0x00, 0x00,\
/* 104 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\
/* 112 */ 0x00, 0x10, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00,\
-/* 120 */ 0x00, 0x00, 0x00, 0x10, 0x00, 0x04, 0x04, 0x00,\
-/* 128 */ 0x00, 0x10, 0x10, 0x00, 0x10, 0x00, 0x10, 0x10,\
-/* 136 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x10,\
-/* 144 */ 0x00, 0x04, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x00,\
-/* 152 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10,\
-/* 160 */ 0x00, 0x00, 0x00,}
+/* 120 */ 0x00, 0x00, 0x10, 0x00, 0x04, 0x04, 0x00, 0x00,\
+/* 128 */ 0x10, 0x10, 0x00, 0x00, 0x10, 0x10, 0x10, 0x00,\
+/* 136 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x10, 0x00,\
+/* 144 */ 0x04, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,\
+/* 152 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, 0x00,\
+/* 160 */ 0x00, 0x00,}
/* The sqlite3P2Values() routine is able to run faster if it knows
** the value of the largest JUMP opcode. The smaller the maximum
@@ -12786,8 +12930,10 @@ SQLITE_PRIVATE int sqlite3VdbeAddOp4Int(Vdbe*,int,int,int,int,int);
SQLITE_PRIVATE void sqlite3VdbeEndCoroutine(Vdbe*,int);
#if defined(SQLITE_DEBUG) && !defined(SQLITE_TEST_REALLOC_STRESS)
SQLITE_PRIVATE void sqlite3VdbeVerifyNoMallocRequired(Vdbe *p, int N);
+SQLITE_PRIVATE void sqlite3VdbeVerifyNoResultRow(Vdbe *p);
#else
# define sqlite3VdbeVerifyNoMallocRequired(A,B)
+# define sqlite3VdbeVerifyNoResultRow(A)
#endif
SQLITE_PRIVATE VdbeOp *sqlite3VdbeAddOpList(Vdbe*, int nOp, VdbeOpList const *aOp, int iLineno);
SQLITE_PRIVATE void sqlite3VdbeAddParseSchemaOp(Vdbe*,int,char*);
@@ -12795,11 +12941,12 @@ SQLITE_PRIVATE void sqlite3VdbeChangeOpcode(Vdbe*, u32 addr, u8);
SQLITE_PRIVATE void sqlite3VdbeChangeP1(Vdbe*, u32 addr, int P1);
SQLITE_PRIVATE void sqlite3VdbeChangeP2(Vdbe*, u32 addr, int P2);
SQLITE_PRIVATE void sqlite3VdbeChangeP3(Vdbe*, u32 addr, int P3);
-SQLITE_PRIVATE void sqlite3VdbeChangeP5(Vdbe*, u8 P5);
+SQLITE_PRIVATE void sqlite3VdbeChangeP5(Vdbe*, u16 P5);
SQLITE_PRIVATE void sqlite3VdbeJumpHere(Vdbe*, int addr);
SQLITE_PRIVATE int sqlite3VdbeChangeToNoop(Vdbe*, int addr);
SQLITE_PRIVATE int sqlite3VdbeDeletePriorOpcode(Vdbe*, u8 op);
SQLITE_PRIVATE void sqlite3VdbeChangeP4(Vdbe*, int addr, const char *zP4, int N);
+SQLITE_PRIVATE void sqlite3VdbeAppendP4(Vdbe*, void *pP4, int p4type);
SQLITE_PRIVATE void sqlite3VdbeSetP4KeyInfo(Parse*, Index*);
SQLITE_PRIVATE void sqlite3VdbeUsesBtree(Vdbe*, int);
SQLITE_PRIVATE VdbeOp *sqlite3VdbeGetOp(Vdbe*, int);
@@ -12835,7 +12982,7 @@ SQLITE_PRIVATE int sqlite3MemCompare(const Mem*, const Mem*, const CollSeq*);
SQLITE_PRIVATE void sqlite3VdbeRecordUnpack(KeyInfo*,int,const void*,UnpackedRecord*);
SQLITE_PRIVATE int sqlite3VdbeRecordCompare(int,const void*,UnpackedRecord*);
SQLITE_PRIVATE int sqlite3VdbeRecordCompareWithSkip(int, const void *, UnpackedRecord *, int);
-SQLITE_PRIVATE UnpackedRecord *sqlite3VdbeAllocUnpackedRecord(KeyInfo *, char *, int, char **);
+SQLITE_PRIVATE UnpackedRecord *sqlite3VdbeAllocUnpackedRecord(KeyInfo*);
typedef int (*RecordCompare)(int,const void*,UnpackedRecord*);
SQLITE_PRIVATE RecordCompare sqlite3VdbeFindCompare(UnpackedRecord*);
@@ -13040,7 +13187,7 @@ SQLITE_PRIVATE int sqlite3PagerOpen(
int,
void(*)(DbPage*)
);
-SQLITE_PRIVATE int sqlite3PagerClose(Pager *pPager);
+SQLITE_PRIVATE int sqlite3PagerClose(Pager *pPager, sqlite3*);
SQLITE_PRIVATE int sqlite3PagerReadFileheader(Pager*, int, unsigned char*);
/* Functions used to configure a Pager object. */
@@ -13091,18 +13238,21 @@ SQLITE_PRIVATE int sqlite3PagerSavepoint(Pager *pPager, int op, int iSavepoint);
SQLITE_PRIVATE int sqlite3PagerSharedLock(Pager *pPager);
#ifndef SQLITE_OMIT_WAL
-SQLITE_PRIVATE int sqlite3PagerCheckpoint(Pager *pPager, int, int*, int*);
+SQLITE_PRIVATE int sqlite3PagerCheckpoint(Pager *pPager, sqlite3*, int, int*, int*);
SQLITE_PRIVATE int sqlite3PagerWalSupported(Pager *pPager);
SQLITE_PRIVATE int sqlite3PagerWalCallback(Pager *pPager);
SQLITE_PRIVATE int sqlite3PagerOpenWal(Pager *pPager, int *pisOpen);
-SQLITE_PRIVATE int sqlite3PagerCloseWal(Pager *pPager);
-SQLITE_PRIVATE int sqlite3PagerUseWal(Pager *pPager);
+SQLITE_PRIVATE int sqlite3PagerCloseWal(Pager *pPager, sqlite3*);
+# ifdef SQLITE_DIRECT_OVERFLOW_READ
+SQLITE_PRIVATE int sqlite3PagerUseWal(Pager *pPager, Pgno);
+# endif
# ifdef SQLITE_ENABLE_SNAPSHOT
SQLITE_PRIVATE int sqlite3PagerSnapshotGet(Pager *pPager, sqlite3_snapshot **ppSnapshot);
SQLITE_PRIVATE int sqlite3PagerSnapshotOpen(Pager *pPager, sqlite3_snapshot *pSnapshot);
+SQLITE_PRIVATE int sqlite3PagerSnapshotRecover(Pager *pPager);
# endif
#else
-# define sqlite3PagerUseWal(x) 0
+# define sqlite3PagerUseWal(x,y) 0
#endif
#ifdef SQLITE_ENABLE_ZIPVFS
@@ -13926,6 +14076,7 @@ struct sqlite3 {
u8 vtabOnConflict; /* Value to return for s3_vtab_on_conflict() */
u8 isTransactionSavepoint; /* True if the outermost savepoint is a TS */
u8 mTrace; /* zero or more SQLITE_TRACE flags */
+ u8 skipBtreeMutex; /* True if no shared-cache backends */
int nextPagesize; /* Pagesize after VACUUM if >0 */
u32 magic; /* Magic number for detect library misuse */
int nChange; /* Value returned by sqlite3_changes() */
@@ -14073,6 +14224,7 @@ struct sqlite3 {
#define SQLITE_Vacuum 0x10000000 /* Currently in a VACUUM */
#define SQLITE_CellSizeCk 0x20000000 /* Check btree cell sizes on load */
#define SQLITE_Fts3Tokenizer 0x40000000 /* Enable fts3_tokenizer(2) */
+#define SQLITE_NoCkptOnClose 0x80000000 /* No checkpoint on close()/DETACH */
/*
@@ -14098,13 +14250,8 @@ struct sqlite3 {
/*
** Macros for testing whether or not optimizations are enabled or disabled.
*/
-#ifndef SQLITE_OMIT_BUILTIN_TEST
#define OptimizationDisabled(db, mask) (((db)->dbOptFlags&(mask))!=0)
#define OptimizationEnabled(db, mask) (((db)->dbOptFlags&(mask))==0)
-#else
-#define OptimizationDisabled(db, mask) 0
-#define OptimizationEnabled(db, mask) 1
-#endif
/*
** Return true if it OK to factor constant expressions into the initialization
@@ -14195,6 +14342,7 @@ struct FuncDestructor {
#define SQLITE_FUNC_MINMAX 0x1000 /* True for min() and max() aggregates */
#define SQLITE_FUNC_SLOCHNG 0x2000 /* "Slow Change". Value constant during a
** single query - might change over time */
+#define SQLITE_FUNC_AFFINITY 0x4000 /* Built-in affinity() function */
/*
** The following three macros, FUNCTION(), LIKEFUNC() and AGGREGATE() are
@@ -14443,9 +14591,9 @@ struct Table {
ExprList *pCheck; /* All CHECK constraints */
/* ... also used as column name list in a VIEW */
int tnum; /* Root BTree page for this table */
+ u32 nTabRef; /* Number of pointers to this Table */
i16 iPKey; /* If not negative, use aCol[iPKey] as the rowid */
i16 nCol; /* Number of columns in this table */
- u16 nRef; /* Number of pointers to this Table */
LogEst nRowLogEst; /* Estimated rows in table - from sqlite_stat1 table */
LogEst szTabRow; /* Estimated size of each table row in bytes */
#ifdef SQLITE_ENABLE_COSTMULT
@@ -15201,7 +15349,7 @@ struct SrcList {
#define WHERE_SORTBYGROUP 0x0200 /* Support sqlite3WhereIsSorted() */
#define WHERE_SEEK_TABLE 0x0400 /* Do not defer seeks on main table */
#define WHERE_ORDERBY_LIMIT 0x0800 /* ORDERBY+LIMIT on the inner loop */
- /* 0x1000 not currently used */
+#define WHERE_SEEK_UNIQ_TABLE 0x1000 /* Do not defer seeks if unique */
/* 0x2000 not currently used */
#define WHERE_USE_LIMIT 0x4000 /* Use the LIMIT in cost estimates */
/* 0x8000 not currently used */
@@ -15584,17 +15732,16 @@ struct Parse {
} aColCache[SQLITE_N_COLCACHE]; /* One for each column cache entry */
int aTempReg[8]; /* Holding area for temporary registers */
Token sNameToken; /* Token with unqualified schema object name */
- Token sLastToken; /* The last token parsed */
/************************************************************************
** Above is constant between recursions. Below is reset before and after
** each recursion. The boundary between these two regions is determined
- ** using offsetof(Parse,nVar) so the nVar field must be the first field
- ** in the recursive region.
+ ** using offsetof(Parse,sLastToken) so the sLastToken field must be the
+ ** first field in the recursive region.
************************************************************************/
+ Token sLastToken; /* The last token parsed */
ynVar nVar; /* Number of '?' variables seen in the SQL so far */
- int nzVar; /* Number of available slots in azVar[] */
u8 iPkSortOrder; /* ASC or DESC for INTEGER PRIMARY KEY */
u8 explain; /* True if the EXPLAIN flag is found on the query */
#ifndef SQLITE_OMIT_VIRTUALTABLE
@@ -15606,7 +15753,7 @@ struct Parse {
int iSelectId; /* ID of current select for EXPLAIN output */
int iNextSelectId; /* Next available select ID for EXPLAIN output */
#endif
- char **azVar; /* Pointers to names of parameters */
+ VList *pVList; /* Mapping between variable names and numbers */
Vdbe *pReprepare; /* VM being reprepared (sqlite3Reprepare()) */
const char *zTail; /* All SQL text past the last semicolon parsed */
Table *pNewTable; /* A table being constructed by CREATE TABLE */
@@ -15626,7 +15773,7 @@ struct Parse {
** Sizes and pointers of various parts of the Parse object.
*/
#define PARSE_HDR_SZ offsetof(Parse,aColCache) /* Recursive part w/o aColCache*/
-#define PARSE_RECURSE_SZ offsetof(Parse,nVar) /* Recursive part */
+#define PARSE_RECURSE_SZ offsetof(Parse,sLastToken) /* Recursive part */
#define PARSE_TAIL_SZ (sizeof(Parse)-PARSE_RECURSE_SZ) /* Non-recursive part */
#define PARSE_TAIL(X) (((char*)(X))+PARSE_RECURSE_SZ) /* Pointer to tail */
@@ -15663,13 +15810,11 @@ struct AuthContext {
#define OPFLAG_NCHANGE 0x01 /* OP_Insert: Set to update db->nChange */
/* Also used in P2 (not P5) of OP_Delete */
#define OPFLAG_EPHEM 0x01 /* OP_Column: Ephemeral output is ok */
-#define OPFLAG_LASTROWID 0x02 /* Set to update db->lastRowid */
+#define OPFLAG_LASTROWID 0x20 /* Set to update db->lastRowid */
#define OPFLAG_ISUPDATE 0x04 /* This OP_Insert is an sql UPDATE */
#define OPFLAG_APPEND 0x08 /* This is likely to be an append */
#define OPFLAG_USESEEKRESULT 0x10 /* Try to avoid a seek in BtreeInsert() */
-#ifdef SQLITE_ENABLE_PREUPDATE_HOOK
#define OPFLAG_ISNOOP 0x40 /* OP_Delete does pre-update-hook only */
-#endif
#define OPFLAG_LENGTHARG 0x40 /* OP_Column only used for length() */
#define OPFLAG_TYPEOFARG 0x80 /* OP_Column only used for typeof() */
#define OPFLAG_BULKCSR 0x01 /* OP_Open** used to open bulk cursor */
@@ -15677,7 +15822,7 @@ struct AuthContext {
#define OPFLAG_FORDELETE 0x08 /* OP_Open should use BTREE_FORDELETE */
#define OPFLAG_P2ISREG 0x10 /* P2 to OP_Open** is a register number */
#define OPFLAG_PERMUTE 0x01 /* OP_Compare: use the permutation */
-#define OPFLAG_SAVEPOSITION 0x02 /* OP_Delete: keep cursor position */
+#define OPFLAG_SAVEPOSITION 0x02 /* OP_Delete/Insert: save cursor pos */
#define OPFLAG_AUXDELETE 0x04 /* OP_Delete: index in a DELETE op */
/*
@@ -15874,7 +16019,7 @@ struct Sqlite3Config {
void (*xVdbeBranch)(void*,int iSrcLine,u8 eThis,u8 eMx); /* Callback */
void *pVdbeBranchArg; /* 1st argument */
#endif
-#ifndef SQLITE_OMIT_BUILTIN_TEST
+#ifndef SQLITE_UNTESTABLE
int (*xTestCallback)(int); /* Invoked by sqlite3FaultSim() */
#endif
int bLocaltimeFault; /* True to fail localtime() calls */
@@ -16078,7 +16223,7 @@ SQLITE_PRIVATE void sqlite3ScratchFree(void*);
SQLITE_PRIVATE void *sqlite3PageMalloc(int);
SQLITE_PRIVATE void sqlite3PageFree(void*);
SQLITE_PRIVATE void sqlite3MemSetDefault(void);
-#ifndef SQLITE_OMIT_BUILTIN_TEST
+#ifndef SQLITE_UNTESTABLE
SQLITE_PRIVATE void sqlite3BenignMallocHooks(void (*)(void), void (*)(void));
#endif
SQLITE_PRIVATE int sqlite3HeapNearlyFull(void);
@@ -16189,7 +16334,7 @@ SQLITE_PRIVATE int sqlite3NoTempsInRange(Parse*,int,int);
SQLITE_PRIVATE Expr *sqlite3ExprAlloc(sqlite3*,int,const Token*,int);
SQLITE_PRIVATE Expr *sqlite3Expr(sqlite3*,int,const char*);
SQLITE_PRIVATE void sqlite3ExprAttachSubtrees(sqlite3*,Expr*,Expr*,Expr*);
-SQLITE_PRIVATE Expr *sqlite3PExpr(Parse*, int, Expr*, Expr*, const Token*);
+SQLITE_PRIVATE Expr *sqlite3PExpr(Parse*, int, Expr*, Expr*);
SQLITE_PRIVATE void sqlite3PExprAddSelect(Parse*, Expr*, Select*);
SQLITE_PRIVATE Expr *sqlite3ExprAnd(sqlite3*,Expr*, Expr*);
SQLITE_PRIVATE Expr *sqlite3ExprFunction(Parse*,ExprList*, Token*);
@@ -16205,6 +16350,9 @@ SQLITE_PRIVATE u32 sqlite3ExprListFlags(const ExprList*);
SQLITE_PRIVATE int sqlite3Init(sqlite3*, char**);
SQLITE_PRIVATE int sqlite3InitCallback(void*, int, char**, char**);
SQLITE_PRIVATE void sqlite3Pragma(Parse*,Token*,Token*,Token*,int);
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+SQLITE_PRIVATE Module *sqlite3PragmaVtabRegister(sqlite3*,const char *zName);
+#endif
SQLITE_PRIVATE void sqlite3ResetAllSchemasOfConnection(sqlite3*);
SQLITE_PRIVATE void sqlite3ResetOneSchema(sqlite3*,int);
SQLITE_PRIVATE void sqlite3CollapseDatabaseArray(sqlite3*);
@@ -16233,7 +16381,7 @@ SQLITE_PRIVATE int sqlite3ParseUri(const char*,const char*,unsigned int*,
sqlite3_vfs**,char**,char **);
SQLITE_PRIVATE Btree *sqlite3DbNameToBtree(sqlite3*,const char*);
-#ifdef SQLITE_OMIT_BUILTIN_TEST
+#ifdef SQLITE_UNTESTABLE
# define sqlite3FaultSim(X) SQLITE_OK
#else
SQLITE_PRIVATE int sqlite3FaultSim(int);
@@ -16246,7 +16394,7 @@ SQLITE_PRIVATE int sqlite3BitvecSet(Bitvec*, u32);
SQLITE_PRIVATE void sqlite3BitvecClear(Bitvec*, u32, void*);
SQLITE_PRIVATE void sqlite3BitvecDestroy(Bitvec*);
SQLITE_PRIVATE u32 sqlite3BitvecSize(Bitvec*);
-#ifndef SQLITE_OMIT_BUILTIN_TEST
+#ifndef SQLITE_UNTESTABLE
SQLITE_PRIVATE int sqlite3BitvecBuiltinTest(int,int*);
#endif
@@ -16335,7 +16483,7 @@ SQLITE_PRIVATE void sqlite3ExprCacheAffinityChange(Parse*, int, int);
SQLITE_PRIVATE void sqlite3ExprCode(Parse*, Expr*, int);
SQLITE_PRIVATE void sqlite3ExprCodeCopy(Parse*, Expr*, int);
SQLITE_PRIVATE void sqlite3ExprCodeFactorable(Parse*, Expr*, int);
-SQLITE_PRIVATE void sqlite3ExprCodeAtInit(Parse*, Expr*, int, u8);
+SQLITE_PRIVATE int sqlite3ExprCodeAtInit(Parse*, Expr*, int);
SQLITE_PRIVATE int sqlite3ExprCodeTemp(Parse*, Expr*, int*);
SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse*, Expr*, int);
SQLITE_PRIVATE void sqlite3ExprCodeAndCache(Parse*, Expr*, int);
@@ -16343,6 +16491,7 @@ SQLITE_PRIVATE int sqlite3ExprCodeExprList(Parse*, ExprList*, int, int, u8);
#define SQLITE_ECEL_DUP 0x01 /* Deep, not shallow copies */
#define SQLITE_ECEL_FACTOR 0x02 /* Factor out constant terms */
#define SQLITE_ECEL_REF 0x04 /* Use ExprList.u.x.iOrderByCol */
+#define SQLITE_ECEL_OMITREF 0x08 /* Omit if ExprList.u.x.iOrderByCol */
SQLITE_PRIVATE void sqlite3ExprIfTrue(Parse*, Expr*, int, int);
SQLITE_PRIVATE void sqlite3ExprIfFalse(Parse*, Expr*, int, int);
SQLITE_PRIVATE void sqlite3ExprIfFalseDup(Parse*, Expr*, int, int);
@@ -16365,7 +16514,7 @@ SQLITE_PRIVATE void sqlite3ExprAnalyzeAggList(NameContext*,ExprList*);
SQLITE_PRIVATE int sqlite3ExprCoveredByIndex(Expr*, int iCur, Index *pIdx);
SQLITE_PRIVATE int sqlite3FunctionUsesThisSrc(Expr*, SrcList*);
SQLITE_PRIVATE Vdbe *sqlite3GetVdbe(Parse*);
-#ifndef SQLITE_OMIT_BUILTIN_TEST
+#ifndef SQLITE_UNTESTABLE
SQLITE_PRIVATE void sqlite3PrngSaveState(void);
SQLITE_PRIVATE void sqlite3PrngRestoreState(void);
#endif
@@ -16396,6 +16545,11 @@ SQLITE_PRIVATE int sqlite3GenerateIndexKey(Parse*, Index*, int, int, int, int*,I
SQLITE_PRIVATE void sqlite3ResolvePartIdxLabel(Parse*,int);
SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(Parse*,Table*,int*,int,int,int,int,
u8,u8,int,int*,int*);
+#ifdef SQLITE_ENABLE_NULL_TRIM
+SQLITE_PRIVATE void sqlite3SetMakeRecordP5(Vdbe*,Table*);
+#else
+# define sqlite3SetMakeRecordP5(A,B)
+#endif
SQLITE_PRIVATE void sqlite3CompleteInsertion(Parse*,Table*,int,int,int,int*,int,int,int);
SQLITE_PRIVATE int sqlite3OpenTableAndIndices(Parse*, Table*, int, u8, int, u8*, int*, int*);
SQLITE_PRIVATE void sqlite3BeginWriteOperation(Parse*, int, int);
@@ -16502,6 +16656,9 @@ SQLITE_PRIVATE LogEst sqlite3LogEstFromDouble(double);
defined(SQLITE_EXPLAIN_ESTIMATED_ROWS)
SQLITE_PRIVATE u64 sqlite3LogEstToInt(LogEst);
#endif
+SQLITE_PRIVATE VList *sqlite3VListAdd(sqlite3*,VList*,const char*,int,int);
+SQLITE_PRIVATE const char *sqlite3VListNumToName(VList*,int);
+SQLITE_PRIVATE int sqlite3VListNameToNum(VList*,const char*,int);
/*
** Routines to read and write variable-length integers. These used to
@@ -16671,8 +16828,10 @@ SQLITE_PRIVATE char sqlite3IndexColumnAffinity(sqlite3*, Index*, int);
/*
** The interface to the LEMON-generated parser
*/
-SQLITE_PRIVATE void *sqlite3ParserAlloc(void*(*)(u64));
-SQLITE_PRIVATE void sqlite3ParserFree(void*, void(*)(void*));
+#ifndef SQLITE_AMALGAMATION
+SQLITE_PRIVATE void *sqlite3ParserAlloc(void*(*)(u64));
+SQLITE_PRIVATE void sqlite3ParserFree(void*, void(*)(void*));
+#endif
SQLITE_PRIVATE void sqlite3Parser(void*, int, Token, Parse*);
#ifdef YYTRACKMAXSTACKDEPTH
SQLITE_PRIVATE int sqlite3ParserStackPeak(void*);
@@ -16718,6 +16877,13 @@ SQLITE_PRIVATE void sqlite3VtabUnlockList(sqlite3*);
SQLITE_PRIVATE int sqlite3VtabSavepoint(sqlite3 *, int, int);
SQLITE_PRIVATE void sqlite3VtabImportErrmsg(Vdbe*, sqlite3_vtab*);
SQLITE_PRIVATE VTable *sqlite3GetVTable(sqlite3*, Table*);
+SQLITE_PRIVATE Module *sqlite3VtabCreateModule(
+ sqlite3*,
+ const char*,
+ const sqlite3_module*,
+ void*,
+ void(*)(void*)
+ );
# define sqlite3VtabInSync(db) ((db)->nVTrans>0 && (db)->aVTrans==0)
#endif
SQLITE_PRIVATE int sqlite3VtabEponymousTableInit(Parse*,Module*);
@@ -16775,6 +16941,7 @@ SQLITE_PRIVATE FKey *sqlite3FkReferences(Table *);
#define sqlite3FkDropTable(a,b,c)
#define sqlite3FkOldmask(a,b) 0
#define sqlite3FkRequired(a,b,c,d) 0
+ #define sqlite3FkReferences(a) 0
#endif
#ifndef SQLITE_OMIT_FOREIGN_KEY
SQLITE_PRIVATE void sqlite3FkDelete(sqlite3 *, Table*);
@@ -16793,10 +16960,10 @@ SQLITE_PRIVATE int sqlite3FkLocateIndex(Parse*,Table*,FKey*,Index**,int**);
/*
** The interface to the code in fault.c used for identifying "benign"
-** malloc failures. This is only present if SQLITE_OMIT_BUILTIN_TEST
+** malloc failures. This is only present if SQLITE_UNTESTABLE
** is not defined.
*/
-#ifndef SQLITE_OMIT_BUILTIN_TEST
+#ifndef SQLITE_UNTESTABLE
SQLITE_PRIVATE void sqlite3BeginBenignMalloc(void);
SQLITE_PRIVATE void sqlite3EndBenignMalloc(void);
#else
@@ -16927,6 +17094,7 @@ SQLITE_PRIVATE int sqlite3ExprVectorSize(Expr *pExpr);
SQLITE_PRIVATE int sqlite3ExprIsVector(Expr *pExpr);
SQLITE_PRIVATE Expr *sqlite3VectorFieldSubexpr(Expr*, int);
SQLITE_PRIVATE Expr *sqlite3ExprForVectorField(Parse*,Expr*,int);
+SQLITE_PRIVATE void sqlite3VectorErrorMsg(Parse*, Expr*);
#endif /* SQLITEINT_H */
@@ -17104,6 +17272,19 @@ SQLITE_PRIVATE const unsigned char sqlite3CtypeMap[256] = {
#endif
/*
+** The default lookaside-configuration, the format "SZ,N". SZ is the
+** number of bytes in each lookaside slot (should be a multiple of 8)
+** and N is the number of slots. The lookaside-configuration can be
+** changed as start-time using sqlite3_config(SQLITE_CONFIG_LOOKASIDE)
+** or at run-time for an individual database connection using
+** sqlite3_db_config(db, SQLITE_DBCONFIG_LOOKASIDE);
+*/
+#ifndef SQLITE_DEFAULT_LOOKASIDE
+# define SQLITE_DEFAULT_LOOKASIDE 1200,100
+#endif
+
+
+/*
** The following singleton contains the global configuration for
** the SQLite library.
*/
@@ -17115,8 +17296,7 @@ SQLITE_PRIVATE SQLITE_WSD struct Sqlite3Config sqlite3Config = {
SQLITE_ALLOW_COVERING_INDEX_SCAN, /* bUseCis */
0x7ffffffe, /* mxStrlen */
0, /* neverCorrupt */
- 128, /* szLookaside */
- 500, /* nLookaside */
+ SQLITE_DEFAULT_LOOKASIDE, /* szLookaside, nLookaside */
SQLITE_STMTJRNL_SPILL, /* nStmtSpill */
{0,0,0,0,0,0,0,0}, /* m */
{0,0,0,0,0,0,0,0,0}, /* mutex */
@@ -17153,7 +17333,7 @@ SQLITE_PRIVATE SQLITE_WSD struct Sqlite3Config sqlite3Config = {
0, /* xVdbeBranch */
0, /* pVbeBranchArg */
#endif
-#ifndef SQLITE_OMIT_BUILTIN_TEST
+#ifndef SQLITE_UNTESTABLE
0, /* xTestCallback */
#endif
0, /* bLocaltimeFault */
@@ -17282,6 +17462,9 @@ static const char * const azCompileOpt[] = {
#if defined(SQLITE_DEFAULT_MMAP_SIZE) && !defined(SQLITE_DEFAULT_MMAP_SIZE_xc)
"DEFAULT_MMAP_SIZE=" CTIMEOPT_VAL(SQLITE_DEFAULT_MMAP_SIZE),
#endif
+#if SQLITE_DIRECT_OVERFLOW_READ
+ "DIRECT_OVERFLOW_READ",
+#endif
#if SQLITE_DISABLE_DIRSYNC
"DISABLE_DIRSYNC",
#endif
@@ -17368,6 +17551,9 @@ static const char * const azCompileOpt[] = {
#if SQLITE_ENABLE_UPDATE_DELETE_LIMIT
"ENABLE_UPDATE_DELETE_LIMIT",
#endif
+#if defined(SQLITE_ENABLE_URI_00_ERROR)
+ "ENABLE_URI_00_ERROR",
+#endif
#if SQLITE_HAS_CODEC
"HAS_CODEC",
#endif
@@ -17443,9 +17629,6 @@ static const char * const azCompileOpt[] = {
#if SQLITE_OMIT_BTREECOUNT
"OMIT_BTREECOUNT",
#endif
-#if SQLITE_OMIT_BUILTIN_TEST
- "OMIT_BUILTIN_TEST",
-#endif
#if SQLITE_OMIT_CAST
"OMIT_CAST",
#endif
@@ -17608,6 +17791,9 @@ static const char * const azCompileOpt[] = {
#if defined(SQLITE_THREADSAFE)
"THREADSAFE=" CTIMEOPT_VAL(SQLITE_THREADSAFE),
#endif
+#if SQLITE_UNTESTABLE
+ "UNTESTABLE"
+#endif
#if SQLITE_USE_ALLOCA
"USE_ALLOCA",
#endif
@@ -17761,57 +17947,60 @@ typedef struct AuxData AuxData;
*/
typedef struct VdbeCursor VdbeCursor;
struct VdbeCursor {
- u8 eCurType; /* One of the CURTYPE_* values above */
- i8 iDb; /* Index of cursor database in db->aDb[] (or -1) */
- u8 nullRow; /* True if pointing to a row with no data */
- u8 deferredMoveto; /* A call to sqlite3BtreeMoveto() is needed */
- u8 isTable; /* True for rowid tables. False for indexes */
+ u8 eCurType; /* One of the CURTYPE_* values above */
+ i8 iDb; /* Index of cursor database in db->aDb[] (or -1) */
+ u8 nullRow; /* True if pointing to a row with no data */
+ u8 deferredMoveto; /* A call to sqlite3BtreeMoveto() is needed */
+ u8 isTable; /* True for rowid tables. False for indexes */
#ifdef SQLITE_DEBUG
- u8 seekOp; /* Most recent seek operation on this cursor */
- u8 wrFlag; /* The wrFlag argument to sqlite3BtreeCursor() */
-#endif
- Bool isEphemeral:1; /* True for an ephemeral table */
- Bool useRandomRowid:1;/* Generate new record numbers semi-randomly */
- Bool isOrdered:1; /* True if the table is not BTREE_UNORDERED */
- Pgno pgnoRoot; /* Root page of the open btree cursor */
- i16 nField; /* Number of fields in the header */
- u16 nHdrParsed; /* Number of header fields parsed so far */
+ u8 seekOp; /* Most recent seek operation on this cursor */
+ u8 wrFlag; /* The wrFlag argument to sqlite3BtreeCursor() */
+#endif
+ Bool isEphemeral:1; /* True for an ephemeral table */
+ Bool useRandomRowid:1; /* Generate new record numbers semi-randomly */
+ Bool isOrdered:1; /* True if the table is not BTREE_UNORDERED */
+ Btree *pBtx; /* Separate file holding temporary table */
+ i64 seqCount; /* Sequence counter */
+ int *aAltMap; /* Mapping from table to index column numbers */
+
+ /* Cached OP_Column parse information is only valid if cacheStatus matches
+ ** Vdbe.cacheCtr. Vdbe.cacheCtr will never take on the value of
+ ** CACHE_STALE (0) and so setting cacheStatus=CACHE_STALE guarantees that
+ ** the cache is out of date. */
+ u32 cacheStatus; /* Cache is valid if this matches Vdbe.cacheCtr */
+ int seekResult; /* Result of previous sqlite3BtreeMoveto() or 0
+ ** if there have been no prior seeks on the cursor. */
+ /* NB: seekResult does not distinguish between "no seeks have ever occurred
+ ** on this cursor" and "the most recent seek was an exact match". */
+
+ /* When a new VdbeCursor is allocated, only the fields above are zeroed.
+ ** The fields that follow are uninitialized, and must be individually
+ ** initialized prior to first use. */
+ VdbeCursor *pAltCursor; /* Associated index cursor from which to read */
union {
BtCursor *pCursor; /* CURTYPE_BTREE. Btree cursor */
sqlite3_vtab_cursor *pVCur; /* CURTYPE_VTAB. Vtab cursor */
int pseudoTableReg; /* CURTYPE_PSEUDO. Reg holding content. */
VdbeSorter *pSorter; /* CURTYPE_SORTER. Sorter object */
} uc;
- Btree *pBt; /* Separate file holding temporary table */
- KeyInfo *pKeyInfo; /* Info about index keys needed by index cursors */
- int seekResult; /* Result of previous sqlite3BtreeMoveto() */
- i64 seqCount; /* Sequence counter */
- i64 movetoTarget; /* Argument to the deferred sqlite3BtreeMoveto() */
- VdbeCursor *pAltCursor; /* Associated index cursor from which to read */
- int *aAltMap; /* Mapping from table to index column numbers */
+ KeyInfo *pKeyInfo; /* Info about index keys needed by index cursors */
+ u32 iHdrOffset; /* Offset to next unparsed byte of the header */
+ Pgno pgnoRoot; /* Root page of the open btree cursor */
+ i16 nField; /* Number of fields in the header */
+ u16 nHdrParsed; /* Number of header fields parsed so far */
+ i64 movetoTarget; /* Argument to the deferred sqlite3BtreeMoveto() */
+ u32 *aOffset; /* Pointer to aType[nField] */
+ const u8 *aRow; /* Data for the current row, if all on one page */
+ u32 payloadSize; /* Total number of bytes in the record */
+ u32 szRow; /* Byte available in aRow */
#ifdef SQLITE_ENABLE_COLUMN_USED_MASK
- u64 maskUsed; /* Mask of columns used by this cursor */
+ u64 maskUsed; /* Mask of columns used by this cursor */
#endif
- /* Cached information about the header for the data record that the
- ** cursor is currently pointing to. Only valid if cacheStatus matches
- ** Vdbe.cacheCtr. Vdbe.cacheCtr will never take on the value of
- ** CACHE_STALE and so setting cacheStatus=CACHE_STALE guarantees that
- ** the cache is out of date.
- **
- ** aRow might point to (ephemeral) data for the current row, or it might
- ** be NULL.
- */
- u32 cacheStatus; /* Cache is valid if this matches Vdbe.cacheCtr */
- u32 payloadSize; /* Total number of bytes in the record */
- u32 szRow; /* Byte available in aRow */
- u32 iHdrOffset; /* Offset to next unparsed byte of the header */
- const u8 *aRow; /* Data for the current row, if all on one page */
- u32 *aOffset; /* Pointer to aType[nField] */
- u32 aType[1]; /* Type values for all entries in the record */
/* 2*nField extra array elements allocated for aType[], beyond the one
** static element declared in the structure. nField total array slots for
** aType[] and nField+1 array slots for aOffset[] */
+ u32 aType[1]; /* Type values record decode. MUST BE LAST */
};
@@ -18031,7 +18220,6 @@ struct Vdbe {
Vdbe *pPrev,*pNext; /* Linked list of VDBEs with the same Vdbe.db */
Parse *pParse; /* Parsing context used to create this Vdbe */
ynVar nVar; /* Number of entries in aVar[] */
- ynVar nzVar; /* Number of entries in azVar[] */
u32 magic; /* Magic number for sanity checking */
int nMem; /* Number of memory locations currently allocated */
int nCursor; /* Number of slots in apCsr[] */
@@ -18056,7 +18244,7 @@ struct Vdbe {
char *zErrMsg; /* Error message written here */
VdbeCursor **apCsr; /* One element of this array for each open cursor */
Mem *aVar; /* Values for the OP_Variable opcode. */
- char **azVar; /* Name of variables */
+ VList *pVList; /* Name of variables */
#ifndef SQLITE_OMIT_TRACE
i64 startTime; /* Time when query started - used for profiling */
#endif
@@ -18120,6 +18308,7 @@ struct PreUpdate {
i64 iKey2; /* Second key value passed to hook */
Mem *aNew; /* Array of new.* values */
Table *pTab; /* Schema object being upated */
+ Index *pPk; /* PK index if pTab is WITHOUT ROWID */
};
/*
@@ -18172,7 +18361,7 @@ SQLITE_PRIVATE void sqlite3VdbeIntegerAffinity(Mem*);
SQLITE_PRIVATE int sqlite3VdbeMemRealify(Mem*);
SQLITE_PRIVATE int sqlite3VdbeMemNumerify(Mem*);
SQLITE_PRIVATE void sqlite3VdbeMemCast(Mem*,u8,u8);
-SQLITE_PRIVATE int sqlite3VdbeMemFromBtree(BtCursor*,u32,u32,int,Mem*);
+SQLITE_PRIVATE int sqlite3VdbeMemFromBtree(BtCursor*,u32,u32,Mem*);
SQLITE_PRIVATE void sqlite3VdbeMemRelease(Mem *p);
SQLITE_PRIVATE int sqlite3VdbeMemFinalize(Mem*, FuncDef*);
SQLITE_PRIVATE const char *sqlite3OpcodeName(int);
@@ -18641,16 +18830,18 @@ struct tm *__cdecl localtime(const time_t *);
*/
typedef struct DateTime DateTime;
struct DateTime {
- sqlite3_int64 iJD; /* The julian day number times 86400000 */
- int Y, M, D; /* Year, month, and day */
- int h, m; /* Hour and minutes */
- int tz; /* Timezone offset in minutes */
- double s; /* Seconds */
- char validYMD; /* True (1) if Y,M,D are valid */
- char validHMS; /* True (1) if h,m,s are valid */
- char validJD; /* True (1) if iJD is valid */
- char validTZ; /* True (1) if tz is valid */
- char tzSet; /* Timezone was set explicitly */
+ sqlite3_int64 iJD; /* The julian day number times 86400000 */
+ int Y, M, D; /* Year, month, and day */
+ int h, m; /* Hour and minutes */
+ int tz; /* Timezone offset in minutes */
+ double s; /* Seconds */
+ char validJD; /* True (1) if iJD is valid */
+ char rawS; /* Raw numeric value stored in s */
+ char validYMD; /* True (1) if Y,M,D are valid */
+ char validHMS; /* True (1) if h,m,s are valid */
+ char validTZ; /* True (1) if tz is valid */
+ char tzSet; /* Timezone was set explicitly */
+ char isError; /* An overflow has occurred */
};
@@ -18798,6 +18989,7 @@ static int parseHhMmSs(const char *zDate, DateTime *p){
s = 0;
}
p->validJD = 0;
+ p->rawS = 0;
p->validHMS = 1;
p->h = h;
p->m = m;
@@ -18808,6 +19000,14 @@ static int parseHhMmSs(const char *zDate, DateTime *p){
}
/*
+** Put the DateTime object into its error state.
+*/
+static void datetimeError(DateTime *p){
+ memset(p, 0, sizeof(*p));
+ p->isError = 1;
+}
+
+/*
** Convert from YYYY-MM-DD HH:MM:SS to julian day. We always assume
** that the YYYY-MM-DD is according to the Gregorian calendar.
**
@@ -18826,6 +19026,10 @@ static void computeJD(DateTime *p){
M = 1;
D = 1;
}
+ if( Y<-4713 || Y>9999 || p->rawS ){
+ datetimeError(p);
+ return;
+ }
if( M<=2 ){
Y--;
M += 12;
@@ -18907,6 +19111,21 @@ static int setDateTimeToCurrent(sqlite3_context *context, DateTime *p){
}
/*
+** Input "r" is a numeric quantity which might be a julian day number,
+** or the number of seconds since 1970. If the value if r is within
+** range of a julian day number, install it as such and set validJD.
+** If the value is a valid unix timestamp, put it in p->s and set p->rawS.
+*/
+static void setRawDateNumber(DateTime *p, double r){
+ p->s = r;
+ p->rawS = 1;
+ if( r>=0.0 && r<5373484.5 ){
+ p->iJD = (sqlite3_int64)(r*86400000.0 + 0.5);
+ p->validJD = 1;
+ }
+}
+
+/*
** Attempt to parse the given string into a julian day number. Return
** the number of errors.
**
@@ -18935,13 +19154,30 @@ static int parseDateOrTime(
}else if( sqlite3StrICmp(zDate,"now")==0){
return setDateTimeToCurrent(context, p);
}else if( sqlite3AtoF(zDate, &r, sqlite3Strlen30(zDate), SQLITE_UTF8) ){
- p->iJD = (sqlite3_int64)(r*86400000.0 + 0.5);
- p->validJD = 1;
+ setRawDateNumber(p, r);
return 0;
}
return 1;
}
+/* The julian day number for 9999-12-31 23:59:59.999 is 5373484.4999999.
+** Multiplying this by 86400000 gives 464269060799999 as the maximum value
+** for DateTime.iJD.
+**
+** But some older compilers (ex: gcc 4.2.1 on older Macs) cannot deal with
+** such a large integer literal, so we have to encode it.
+*/
+#define INT_464269060799999 ((((i64)0x1a640)<<32)|0x1072fdff)
+
+/*
+** Return TRUE if the given julian day number is within range.
+**
+** The input is the JulianDay times 86400000.
+*/
+static int validJulianDay(sqlite3_int64 iJD){
+ return iJD>=0 && iJD<=INT_464269060799999;
+}
+
/*
** Compute the Year, Month, and Day from the julian day number.
*/
@@ -18953,6 +19189,7 @@ static void computeYMD(DateTime *p){
p->M = 1;
p->D = 1;
}else{
+ assert( validJulianDay(p->iJD) );
Z = (int)((p->iJD + 43200000)/86400000);
A = (int)((Z - 1867216.25)/36524.25);
A = Z + 1 + A - (A/4);
@@ -18983,6 +19220,7 @@ static void computeHMS(DateTime *p){
s -= p->h*3600;
p->m = s/60;
p->s += s - p->m*60;
+ p->rawS = 0;
p->validHMS = 1;
}
@@ -19044,14 +19282,14 @@ static int osLocaltime(time_t *t, struct tm *pTm){
#endif
sqlite3_mutex_enter(mutex);
pX = localtime(t);
-#ifndef SQLITE_OMIT_BUILTIN_TEST
+#ifndef SQLITE_UNTESTABLE
if( sqlite3GlobalConfig.bLocaltimeFault ) pX = 0;
#endif
if( pX ) *pTm = *pX;
sqlite3_mutex_leave(mutex);
rc = pX==0;
#else
-#ifndef SQLITE_OMIT_BUILTIN_TEST
+#ifndef SQLITE_UNTESTABLE
if( sqlite3GlobalConfig.bLocaltimeFault ) return 1;
#endif
#if HAVE_LOCALTIME_R
@@ -19122,7 +19360,9 @@ static sqlite3_int64 localtimeOffset(
y.validYMD = 1;
y.validHMS = 1;
y.validJD = 0;
+ y.rawS = 0;
y.validTZ = 0;
+ y.isError = 0;
computeJD(&y);
*pRc = SQLITE_OK;
return y.iJD - x.iJD;
@@ -19130,6 +19370,29 @@ static sqlite3_int64 localtimeOffset(
#endif /* SQLITE_OMIT_LOCALTIME */
/*
+** The following table defines various date transformations of the form
+**
+** 'NNN days'
+**
+** Where NNN is an arbitrary floating-point number and "days" can be one
+** of several units of time.
+*/
+static const struct {
+ u8 eType; /* Transformation type code */
+ u8 nName; /* Length of th name */
+ char *zName; /* Name of the transformation */
+ double rLimit; /* Maximum NNN value for this transform */
+ double rXform; /* Constant used for this transform */
+} aXformType[] = {
+ { 0, 6, "second", 464269060800.0, 86400000.0/(24.0*60.0*60.0) },
+ { 0, 6, "minute", 7737817680.0, 86400000.0/(24.0*60.0) },
+ { 0, 4, "hour", 128963628.0, 86400000.0/24.0 },
+ { 0, 3, "day", 5373485.0, 86400000.0 },
+ { 1, 5, "month", 176546.0, 30.0*86400000.0 },
+ { 2, 4, "year", 14713.0, 365.0*86400000.0 },
+};
+
+/*
** Process a modifier to a date-time stamp. The modifiers are
** as follows:
**
@@ -19153,17 +19416,15 @@ static sqlite3_int64 localtimeOffset(
** to context pCtx. If the error is an unrecognized modifier, no error is
** written to pCtx.
*/
-static int parseModifier(sqlite3_context *pCtx, const char *zMod, DateTime *p){
+static int parseModifier(
+ sqlite3_context *pCtx, /* Function context */
+ const char *z, /* The text of the modifier */
+ int n, /* Length of zMod in bytes */
+ DateTime *p /* The date/time value to be modified */
+){
int rc = 1;
- int n;
double r;
- char *z, zBuf[30];
- z = zBuf;
- for(n=0; n<ArraySize(zBuf)-1 && zMod[n]; n++){
- z[n] = (char)sqlite3UpperToLower[(u8)zMod[n]];
- }
- z[n] = 0;
- switch( z[0] ){
+ switch(sqlite3UpperToLower[(u8)z[0]] ){
#ifndef SQLITE_OMIT_LOCALTIME
case 'l': {
/* localtime
@@ -19171,7 +19432,7 @@ static int parseModifier(sqlite3_context *pCtx, const char *zMod, DateTime *p){
** Assuming the current time value is UTC (a.k.a. GMT), shift it to
** show local time.
*/
- if( strcmp(z, "localtime")==0 ){
+ if( sqlite3_stricmp(z, "localtime")==0 ){
computeJD(p);
p->iJD += localtimeOffset(p, pCtx, &rc);
clearYMD_HMS_TZ(p);
@@ -19183,16 +19444,21 @@ static int parseModifier(sqlite3_context *pCtx, const char *zMod, DateTime *p){
/*
** unixepoch
**
- ** Treat the current value of p->iJD as the number of
+ ** Treat the current value of p->s as the number of
** seconds since 1970. Convert to a real julian day number.
*/
- if( strcmp(z, "unixepoch")==0 && p->validJD ){
- p->iJD = (p->iJD + 43200)/86400 + 21086676*(i64)10000000;
- clearYMD_HMS_TZ(p);
- rc = 0;
+ if( sqlite3_stricmp(z, "unixepoch")==0 && p->rawS ){
+ r = p->s*1000.0 + 210866760000000.0;
+ if( r>=0.0 && r<464269060800000.0 ){
+ clearYMD_HMS_TZ(p);
+ p->iJD = (sqlite3_int64)r;
+ p->validJD = 1;
+ p->rawS = 0;
+ rc = 0;
+ }
}
#ifndef SQLITE_OMIT_LOCALTIME
- else if( strcmp(z, "utc")==0 ){
+ else if( sqlite3_stricmp(z, "utc")==0 ){
if( p->tzSet==0 ){
sqlite3_int64 c1;
computeJD(p);
@@ -19218,7 +19484,7 @@ static int parseModifier(sqlite3_context *pCtx, const char *zMod, DateTime *p){
** weekday N where 0==Sunday, 1==Monday, and so forth. If the
** date is already on the appropriate weekday, this is a no-op.
*/
- if( strncmp(z, "weekday ", 8)==0
+ if( sqlite3_strnicmp(z, "weekday ", 8)==0
&& sqlite3AtoF(&z[8], &r, sqlite3Strlen30(&z[8]), SQLITE_UTF8)
&& (n=(int)r)==r && n>=0 && r<7 ){
sqlite3_int64 Z;
@@ -19241,7 +19507,7 @@ static int parseModifier(sqlite3_context *pCtx, const char *zMod, DateTime *p){
** Move the date backwards to the beginning of the current day,
** or month or year.
*/
- if( strncmp(z, "start of ", 9)!=0 ) break;
+ if( sqlite3_strnicmp(z, "start of ", 9)!=0 ) break;
z += 9;
computeYMD(p);
p->validHMS = 1;
@@ -19249,15 +19515,15 @@ static int parseModifier(sqlite3_context *pCtx, const char *zMod, DateTime *p){
p->s = 0.0;
p->validTZ = 0;
p->validJD = 0;
- if( strcmp(z,"month")==0 ){
+ if( sqlite3_stricmp(z,"month")==0 ){
p->D = 1;
rc = 0;
- }else if( strcmp(z,"year")==0 ){
+ }else if( sqlite3_stricmp(z,"year")==0 ){
computeYMD(p);
p->M = 1;
p->D = 1;
rc = 0;
- }else if( strcmp(z,"day")==0 ){
+ }else if( sqlite3_stricmp(z,"day")==0 ){
rc = 0;
}
break;
@@ -19275,6 +19541,7 @@ static int parseModifier(sqlite3_context *pCtx, const char *zMod, DateTime *p){
case '8':
case '9': {
double rRounder;
+ int i;
for(n=1; z[n] && z[n]!=':' && !sqlite3Isspace(z[n]); n++){}
if( !sqlite3AtoF(z, &r, n, SQLITE_UTF8) ){
rc = 1;
@@ -19303,46 +19570,48 @@ static int parseModifier(sqlite3_context *pCtx, const char *zMod, DateTime *p){
rc = 0;
break;
}
+
+ /* If control reaches this point, it means the transformation is
+ ** one of the forms like "+NNN days". */
z += n;
while( sqlite3Isspace(*z) ) z++;
n = sqlite3Strlen30(z);
if( n>10 || n<3 ) break;
- if( z[n-1]=='s' ){ z[n-1] = 0; n--; }
+ if( sqlite3UpperToLower[(u8)z[n-1]]=='s' ) n--;
computeJD(p);
- rc = 0;
+ rc = 1;
rRounder = r<0 ? -0.5 : +0.5;
- if( n==3 && strcmp(z,"day")==0 ){
- p->iJD += (sqlite3_int64)(r*86400000.0 + rRounder);
- }else if( n==4 && strcmp(z,"hour")==0 ){
- p->iJD += (sqlite3_int64)(r*(86400000.0/24.0) + rRounder);
- }else if( n==6 && strcmp(z,"minute")==0 ){
- p->iJD += (sqlite3_int64)(r*(86400000.0/(24.0*60.0)) + rRounder);
- }else if( n==6 && strcmp(z,"second")==0 ){
- p->iJD += (sqlite3_int64)(r*(86400000.0/(24.0*60.0*60.0)) + rRounder);
- }else if( n==5 && strcmp(z,"month")==0 ){
- int x, y;
- computeYMD_HMS(p);
- p->M += (int)r;
- x = p->M>0 ? (p->M-1)/12 : (p->M-12)/12;
- p->Y += x;
- p->M -= x*12;
- p->validJD = 0;
- computeJD(p);
- y = (int)r;
- if( y!=r ){
- p->iJD += (sqlite3_int64)((r - y)*30.0*86400000.0 + rRounder);
- }
- }else if( n==4 && strcmp(z,"year")==0 ){
- int y = (int)r;
- computeYMD_HMS(p);
- p->Y += y;
- p->validJD = 0;
- computeJD(p);
- if( y!=r ){
- p->iJD += (sqlite3_int64)((r - y)*365.0*86400000.0 + rRounder);
+ for(i=0; i<ArraySize(aXformType); i++){
+ if( aXformType[i].nName==n
+ && sqlite3_strnicmp(aXformType[i].zName, z, n)==0
+ && r>-aXformType[i].rLimit && r<aXformType[i].rLimit
+ ){
+ switch( aXformType[i].eType ){
+ case 1: { /* Special processing to add months */
+ int x;
+ computeYMD_HMS(p);
+ p->M += (int)r;
+ x = p->M>0 ? (p->M-1)/12 : (p->M-12)/12;
+ p->Y += x;
+ p->M -= x*12;
+ p->validJD = 0;
+ r -= (int)r;
+ break;
+ }
+ case 2: { /* Special processing to add years */
+ int y = (int)r;
+ computeYMD_HMS(p);
+ p->Y += y;
+ p->validJD = 0;
+ r -= (int)r;
+ break;
+ }
+ }
+ computeJD(p);
+ p->iJD += (sqlite3_int64)(r*aXformType[i].rXform + rRounder);
+ rc = 0;
+ break;
}
- }else{
- rc = 1;
}
clearYMD_HMS_TZ(p);
break;
@@ -19369,7 +19638,7 @@ static int isDate(
sqlite3_value **argv,
DateTime *p
){
- int i;
+ int i, n;
const unsigned char *z;
int eType;
memset(p, 0, sizeof(*p));
@@ -19378,8 +19647,7 @@ static int isDate(
}
if( (eType = sqlite3_value_type(argv[0]))==SQLITE_FLOAT
|| eType==SQLITE_INTEGER ){
- p->iJD = (sqlite3_int64)(sqlite3_value_double(argv[0])*86400000.0 + 0.5);
- p->validJD = 1;
+ setRawDateNumber(p, sqlite3_value_double(argv[0]));
}else{
z = sqlite3_value_text(argv[0]);
if( !z || parseDateOrTime(context, (char*)z, p) ){
@@ -19388,8 +19656,11 @@ static int isDate(
}
for(i=1; i<argc; i++){
z = sqlite3_value_text(argv[i]);
- if( z==0 || parseModifier(context, (char*)z, p) ) return 1;
+ n = sqlite3_value_bytes(argv[i]);
+ if( z==0 || parseModifier(context, (char*)z, n, p) ) return 1;
}
+ computeJD(p);
+ if( p->isError || !validJulianDay(p->iJD) ) return 1;
return 0;
}
@@ -20187,7 +20458,7 @@ SQLITE_API int sqlite3_vfs_unregister(sqlite3_vfs *pVfs){
/* #include "sqliteInt.h" */
-#ifndef SQLITE_OMIT_BUILTIN_TEST
+#ifndef SQLITE_UNTESTABLE
/*
** Global variables.
@@ -20245,7 +20516,7 @@ SQLITE_PRIVATE void sqlite3EndBenignMalloc(void){
}
}
-#endif /* #ifndef SQLITE_OMIT_BUILTIN_TEST */
+#endif /* #ifndef SQLITE_UNTESTABLE */
/************** End of fault.c ***********************************************/
/************** Begin file mem0.c ********************************************/
@@ -20438,7 +20709,9 @@ static malloc_zone_t* _sqliteZone_;
*/
static void *sqlite3MemMalloc(int nByte){
#ifdef SQLITE_MALLOCSIZE
- void *p = SQLITE_MALLOC( nByte );
+ void *p;
+ testcase( ROUND8(nByte)==nByte );
+ p = SQLITE_MALLOC( nByte );
if( p==0 ){
testcase( sqlite3GlobalConfig.xLog!=0 );
sqlite3_log(SQLITE_NOMEM, "failed to allocate %u bytes of memory", nByte);
@@ -20447,7 +20720,7 @@ static void *sqlite3MemMalloc(int nByte){
#else
sqlite3_int64 *p;
assert( nByte>0 );
- nByte = ROUND8(nByte);
+ testcase( ROUND8(nByte)!=nByte );
p = SQLITE_MALLOC( nByte+8 );
if( p ){
p[0] = nByte;
@@ -23569,8 +23842,7 @@ SQLITE_PRIVATE void sqlite3MemoryBarrier(void){
SQLITE_MEMORY_BARRIER;
#elif defined(__GNUC__)
__sync_synchronize();
-#elif !defined(SQLITE_DISABLE_INTRINSIC) && \
- defined(_MSC_VER) && _MSC_VER>=1300
+#elif MSVC_VERSION>=1300
_ReadWriteBarrier();
#elif defined(MemoryBarrier)
MemoryBarrier();
@@ -24102,11 +24374,19 @@ static void sqlite3MallocAlarm(int nByte){
** Do a memory allocation with statistics and alarms. Assume the
** lock is already held.
*/
-static int mallocWithAlarm(int n, void **pp){
- int nFull;
+static void mallocWithAlarm(int n, void **pp){
void *p;
+ int nFull;
assert( sqlite3_mutex_held(mem0.mutex) );
+ assert( n>0 );
+
+ /* In Firefox (circa 2017-02-08), xRoundup() is remapped to an internal
+ ** implementation of malloc_good_size(), which must be called in debug
+ ** mode and specifically when the DMD "Dark Matter Detector" is enabled
+ ** or else a crash results. Hence, do not attempt to optimize out the
+ ** following xRoundup() call. */
nFull = sqlite3GlobalConfig.m.xRoundup(n);
+
sqlite3StatusHighwater(SQLITE_STATUS_MALLOC_SIZE, n);
if( mem0.alarmThreshold>0 ){
sqlite3_int64 nUsed = sqlite3StatusValue(SQLITE_STATUS_MEMORY_USED);
@@ -24130,7 +24410,6 @@ static int mallocWithAlarm(int n, void **pp){
sqlite3StatusUp(SQLITE_STATUS_MALLOC_COUNT, 1);
}
*pp = p;
- return nFull;
}
/*
@@ -24404,7 +24683,7 @@ SQLITE_PRIVATE void *sqlite3Realloc(void *pOld, u64 nBytes){
sqlite3_mutex_enter(mem0.mutex);
sqlite3StatusHighwater(SQLITE_STATUS_MALLOC_SIZE, (int)nBytes);
nDiff = nNew - nOld;
- if( sqlite3StatusValue(SQLITE_STATUS_MEMORY_USED) >=
+ if( nDiff>0 && sqlite3StatusValue(SQLITE_STATUS_MEMORY_USED) >=
mem0.alarmThreshold-nDiff ){
sqlite3MallocAlarm(nDiff);
}
@@ -24611,9 +24890,8 @@ SQLITE_PRIVATE char *sqlite3DbStrDup(sqlite3 *db, const char *z){
if( z==0 ){
return 0;
}
- n = sqlite3Strlen30(z) + 1;
- assert( (n&0x7fffffff)==n );
- zNew = sqlite3DbMallocRaw(db, (int)n);
+ n = strlen(z) + 1;
+ zNew = sqlite3DbMallocRaw(db, n);
if( zNew ){
memcpy(zNew, z, n);
}
@@ -24771,7 +25049,6 @@ typedef struct et_info { /* Information about each format field */
** Allowed values for et_info.flags
*/
#define FLAG_SIGNED 1 /* True if the value to convert is signed */
-#define FLAG_INTERN 2 /* True if for internal use only */
#define FLAG_STRING 4 /* Allow infinity precision */
@@ -24805,11 +25082,10 @@ static const et_info fmtinfo[] = {
{ '%', 0, 0, etPERCENT, 0, 0 },
{ 'p', 16, 0, etPOINTER, 0, 1 },
-/* All the rest have the FLAG_INTERN bit set and are thus for internal
-** use only */
- { 'T', 0, 2, etTOKEN, 0, 0 },
- { 'S', 0, 2, etSRCLIST, 0, 0 },
- { 'r', 10, 3, etORDINAL, 0, 0 },
+ /* All the rest are undocumented and are for internal use only */
+ { 'T', 0, 0, etTOKEN, 0, 0 },
+ { 'S', 0, 0, etSRCLIST, 0, 0 },
+ { 'r', 10, 1, etORDINAL, 0, 0 },
};
/*
@@ -24903,7 +25179,6 @@ SQLITE_PRIVATE void sqlite3VXPrintf(
etByte done; /* Loop termination flag */
etByte xtype = etINVALID; /* Conversion paradigm */
u8 bArgList; /* True for SQLITE_PRINTF_SQLFUNC */
- u8 useIntern; /* Ok to use internal conversions (ex: %T) */
char prefix; /* Prefix character. "+" or "-" or " " or '\0'. */
sqlite_uint64 longvalue; /* Value for integer types */
LONGDOUBLE_TYPE realvalue; /* Value for real types */
@@ -24922,13 +25197,11 @@ SQLITE_PRIVATE void sqlite3VXPrintf(
char buf[etBUFSIZE]; /* Conversion buffer */
bufpt = 0;
- if( pAccum->printfFlags ){
- if( (bArgList = (pAccum->printfFlags & SQLITE_PRINTF_SQLFUNC))!=0 ){
- pArgList = va_arg(ap, PrintfArguments*);
- }
- useIntern = pAccum->printfFlags & SQLITE_PRINTF_INTERNAL;
+ if( (pAccum->printfFlags & SQLITE_PRINTF_SQLFUNC)!=0 ){
+ pArgList = va_arg(ap, PrintfArguments*);
+ bArgList = 1;
}else{
- bArgList = useIntern = 0;
+ bArgList = 0;
}
for(; (c=(*fmt))!=0; ++fmt){
if( c!='%' ){
@@ -25040,11 +25313,7 @@ SQLITE_PRIVATE void sqlite3VXPrintf(
for(idx=0; idx<ArraySize(fmtinfo); idx++){
if( c==fmtinfo[idx].fmttype ){
infop = &fmtinfo[idx];
- if( useIntern || (infop->flags & FLAG_INTERN)==0 ){
- xtype = infop->type;
- }else{
- return;
- }
+ xtype = infop->type;
break;
}
}
@@ -25413,7 +25682,9 @@ SQLITE_PRIVATE void sqlite3VXPrintf(
break;
}
case etTOKEN: {
- Token *pToken = va_arg(ap, Token*);
+ Token *pToken;
+ if( (pAccum->printfFlags & SQLITE_PRINTF_INTERNAL)==0 ) return;
+ pToken = va_arg(ap, Token*);
assert( bArgList==0 );
if( pToken && pToken->n ){
sqlite3StrAccumAppend(pAccum, (const char*)pToken->z, pToken->n);
@@ -25422,9 +25693,13 @@ SQLITE_PRIVATE void sqlite3VXPrintf(
break;
}
case etSRCLIST: {
- SrcList *pSrc = va_arg(ap, SrcList*);
- int k = va_arg(ap, int);
- struct SrcList_item *pItem = &pSrc->a[k];
+ SrcList *pSrc;
+ int k;
+ struct SrcList_item *pItem;
+ if( (pAccum->printfFlags & SQLITE_PRINTF_INTERNAL)==0 ) return;
+ pSrc = va_arg(ap, SrcList*);
+ k = va_arg(ap, int);
+ pItem = &pSrc->a[k];
assert( bArgList==0 );
assert( k>=0 && k<pSrc->nSrc );
if( pItem->zDatabase ){
@@ -25446,9 +25721,13 @@ SQLITE_PRIVATE void sqlite3VXPrintf(
** the output.
*/
width -= length;
- if( width>0 && !flag_leftjustify ) sqlite3AppendChar(pAccum, width, ' ');
- sqlite3StrAccumAppend(pAccum, bufpt, length);
- if( width>0 && flag_leftjustify ) sqlite3AppendChar(pAccum, width, ' ');
+ if( width>0 ){
+ if( !flag_leftjustify ) sqlite3AppendChar(pAccum, width, ' ');
+ sqlite3StrAccumAppend(pAccum, bufpt, length);
+ if( flag_leftjustify ) sqlite3AppendChar(pAccum, width, ' ');
+ }else{
+ sqlite3StrAccumAppend(pAccum, bufpt, length);
+ }
if( zExtra ){
sqlite3DbFree(pAccum->db, zExtra);
@@ -25553,7 +25832,7 @@ SQLITE_PRIVATE void sqlite3StrAccumAppend(StrAccum *p, const char *z, int N){
assert( p->accError==0 || p->nAlloc==0 );
if( p->nChar+N >= p->nAlloc ){
enlargeAndAppend(p,z,N);
- }else{
+ }else if( N ){
assert( p->zText );
p->nChar += N;
memcpy(&p->zText[p->nChar-N], z, N);
@@ -25573,18 +25852,23 @@ SQLITE_PRIVATE void sqlite3StrAccumAppendAll(StrAccum *p, const char *z){
** Return a pointer to the resulting string. Return a NULL
** pointer if any kind of error was encountered.
*/
+static SQLITE_NOINLINE char *strAccumFinishRealloc(StrAccum *p){
+ assert( p->mxAlloc>0 && !isMalloced(p) );
+ p->zText = sqlite3DbMallocRaw(p->db, p->nChar+1 );
+ if( p->zText ){
+ memcpy(p->zText, p->zBase, p->nChar+1);
+ p->printfFlags |= SQLITE_PRINTF_MALLOCED;
+ }else{
+ setStrAccumError(p, STRACCUM_NOMEM);
+ }
+ return p->zText;
+}
SQLITE_PRIVATE char *sqlite3StrAccumFinish(StrAccum *p){
if( p->zText ){
assert( (p->zText==p->zBase)==!isMalloced(p) );
p->zText[p->nChar] = 0;
if( p->mxAlloc>0 && !isMalloced(p) ){
- p->zText = sqlite3DbMallocRaw(p->db, p->nChar+1 );
- if( p->zText ){
- memcpy(p->zText, p->zBase, p->nChar+1);
- p->printfFlags |= SQLITE_PRINTF_MALLOCED;
- }else{
- setStrAccumError(p, STRACCUM_NOMEM);
- }
+ return strAccumFinishRealloc(p);
}
}
return p->zText;
@@ -25724,7 +26008,8 @@ SQLITE_API char *sqlite3_vsnprintf(int n, char *zBuf, const char *zFormat, va_li
#endif
sqlite3StrAccumInit(&acc, 0, zBuf, n, 0);
sqlite3VXPrintf(&acc, zFormat, ap);
- return sqlite3StrAccumFinish(&acc);
+ zBuf[acc.nChar] = 0;
+ return zBuf;
}
SQLITE_API char *sqlite3_snprintf(int n, char *zBuf, const char *zFormat, ...){
char *z;
@@ -25872,6 +26157,7 @@ static void sqlite3TreeViewLine(TreeView *p, const char *zFormat, ...){
va_start(ap, zFormat);
sqlite3VXPrintf(&acc, zFormat, ap);
va_end(ap);
+ assert( acc.nChar>0 );
if( zBuf[acc.nChar-1]!='\n' ) sqlite3StrAccumAppend(&acc, "\n", 1);
sqlite3StrAccumFinish(&acc);
fprintf(stdout,"%s", zBuf);
@@ -26432,7 +26718,7 @@ SQLITE_API void sqlite3_randomness(int N, void *pBuf){
sqlite3_mutex_leave(mutex);
}
-#ifndef SQLITE_OMIT_BUILTIN_TEST
+#ifndef SQLITE_UNTESTABLE
/*
** For testing purposes, we sometimes want to preserve the state of
** PRNG and restore the PRNG to its saved state at a later time, or
@@ -26457,7 +26743,7 @@ SQLITE_PRIVATE void sqlite3PrngRestoreState(void){
sizeof(sqlite3Prng)
);
}
-#endif /* SQLITE_OMIT_BUILTIN_TEST */
+#endif /* SQLITE_UNTESTABLE */
/************** End of random.c **********************************************/
/************** Begin file threads.c *****************************************/
@@ -27315,7 +27601,7 @@ SQLITE_PRIVATE void sqlite3Coverage(int x){
** Return whatever integer value the test callback returns, or return
** SQLITE_OK if no test callback is installed.
*/
-#ifndef SQLITE_OMIT_BUILTIN_TEST
+#ifndef SQLITE_UNTESTABLE
SQLITE_PRIVATE int sqlite3FaultSim(int iTest){
int (*xCallback)(int) = sqlite3GlobalConfig.xTestCallback;
return xCallback ? xCallback(iTest) : SQLITE_OK;
@@ -28413,13 +28699,11 @@ SQLITE_PRIVATE u32 sqlite3Get4byte(const u8 *p){
u32 x;
memcpy(&x,p,4);
return x;
-#elif SQLITE_BYTEORDER==1234 && !defined(SQLITE_DISABLE_INTRINSIC) \
- && defined(__GNUC__) && GCC_VERSION>=4003000
+#elif SQLITE_BYTEORDER==1234 && (GCC_VERSION>=4003000 || CLANG_VERSION>=3000000)
u32 x;
memcpy(&x,p,4);
return __builtin_bswap32(x);
-#elif SQLITE_BYTEORDER==1234 && !defined(SQLITE_DISABLE_INTRINSIC) \
- && defined(_MSC_VER) && _MSC_VER>=1300
+#elif SQLITE_BYTEORDER==1234 && MSVC_VERSION>=1300
u32 x;
memcpy(&x,p,4);
return _byteswap_ulong(x);
@@ -28431,12 +28715,10 @@ SQLITE_PRIVATE u32 sqlite3Get4byte(const u8 *p){
SQLITE_PRIVATE void sqlite3Put4byte(unsigned char *p, u32 v){
#if SQLITE_BYTEORDER==4321
memcpy(p,&v,4);
-#elif SQLITE_BYTEORDER==1234 && !defined(SQLITE_DISABLE_INTRINSIC) \
- && defined(__GNUC__) && GCC_VERSION>=4003000
+#elif SQLITE_BYTEORDER==1234 && (GCC_VERSION>=4003000 || CLANG_VERSION>=3000000)
u32 x = __builtin_bswap32(v);
memcpy(p,&x,4);
-#elif SQLITE_BYTEORDER==1234 && !defined(SQLITE_DISABLE_INTRINSIC) \
- && defined(_MSC_VER) && _MSC_VER>=1300
+#elif SQLITE_BYTEORDER==1234 && MSVC_VERSION>=1300
u32 x = _byteswap_ulong(v);
memcpy(p,&x,4);
#else
@@ -28552,6 +28834,9 @@ SQLITE_PRIVATE int sqlite3SafetyCheckSickOrOk(sqlite3 *db){
** overflow, leave *pA unchanged and return 1.
*/
SQLITE_PRIVATE int sqlite3AddInt64(i64 *pA, i64 iB){
+#if GCC_VERSION>=5004000 || CLANG_VERSION>=4000000
+ return __builtin_add_overflow(*pA, iB, pA);
+#else
i64 iA = *pA;
testcase( iA==0 ); testcase( iA==1 );
testcase( iB==-1 ); testcase( iB==0 );
@@ -28566,8 +28851,12 @@ SQLITE_PRIVATE int sqlite3AddInt64(i64 *pA, i64 iB){
}
*pA += iB;
return 0;
+#endif
}
SQLITE_PRIVATE int sqlite3SubInt64(i64 *pA, i64 iB){
+#if GCC_VERSION>=5004000 || CLANG_VERSION>=4000000
+ return __builtin_sub_overflow(*pA, iB, pA);
+#else
testcase( iB==SMALLEST_INT64+1 );
if( iB==SMALLEST_INT64 ){
testcase( (*pA)==(-1) ); testcase( (*pA)==0 );
@@ -28577,8 +28866,12 @@ SQLITE_PRIVATE int sqlite3SubInt64(i64 *pA, i64 iB){
}else{
return sqlite3AddInt64(pA, -iB);
}
+#endif
}
SQLITE_PRIVATE int sqlite3MulInt64(i64 *pA, i64 iB){
+#if GCC_VERSION>=5004000 || CLANG_VERSION>=4000000
+ return __builtin_mul_overflow(*pA, iB, pA);
+#else
i64 iA = *pA;
if( iB>0 ){
if( iA>LARGEST_INT64/iB ) return 1;
@@ -28594,6 +28887,7 @@ SQLITE_PRIVATE int sqlite3MulInt64(i64 *pA, i64 iB){
}
*pA = iA*iB;
return 0;
+#endif
}
/*
@@ -28727,6 +29021,109 @@ SQLITE_PRIVATE u64 sqlite3LogEstToInt(LogEst x){
}
#endif /* defined SCANSTAT or STAT4 or ESTIMATED_ROWS */
+/*
+** Add a new name/number pair to a VList. This might require that the
+** VList object be reallocated, so return the new VList. If an OOM
+** error occurs, the original VList returned and the
+** db->mallocFailed flag is set.
+**
+** A VList is really just an array of integers. To destroy a VList,
+** simply pass it to sqlite3DbFree().
+**
+** The first integer is the number of integers allocated for the whole
+** VList. The second integer is the number of integers actually used.
+** Each name/number pair is encoded by subsequent groups of 3 or more
+** integers.
+**
+** Each name/number pair starts with two integers which are the numeric
+** value for the pair and the size of the name/number pair, respectively.
+** The text name overlays one or more following integers. The text name
+** is always zero-terminated.
+**
+** Conceptually:
+**
+** struct VList {
+** int nAlloc; // Number of allocated slots
+** int nUsed; // Number of used slots
+** struct VListEntry {
+** int iValue; // Value for this entry
+** int nSlot; // Slots used by this entry
+** // ... variable name goes here
+** } a[0];
+** }
+**
+** During code generation, pointers to the variable names within the
+** VList are taken. When that happens, nAlloc is set to zero as an
+** indication that the VList may never again be enlarged, since the
+** accompanying realloc() would invalidate the pointers.
+*/
+SQLITE_PRIVATE VList *sqlite3VListAdd(
+ sqlite3 *db, /* The database connection used for malloc() */
+ VList *pIn, /* The input VList. Might be NULL */
+ const char *zName, /* Name of symbol to add */
+ int nName, /* Bytes of text in zName */
+ int iVal /* Value to associate with zName */
+){
+ int nInt; /* number of sizeof(int) objects needed for zName */
+ char *z; /* Pointer to where zName will be stored */
+ int i; /* Index in pIn[] where zName is stored */
+
+ nInt = nName/4 + 3;
+ assert( pIn==0 || pIn[0]>=3 ); /* Verify ok to add new elements */
+ if( pIn==0 || pIn[1]+nInt > pIn[0] ){
+ /* Enlarge the allocation */
+ int nAlloc = (pIn ? pIn[0]*2 : 10) + nInt;
+ VList *pOut = sqlite3DbRealloc(db, pIn, nAlloc*sizeof(int));
+ if( pOut==0 ) return pIn;
+ if( pIn==0 ) pOut[1] = 2;
+ pIn = pOut;
+ pIn[0] = nAlloc;
+ }
+ i = pIn[1];
+ pIn[i] = iVal;
+ pIn[i+1] = nInt;
+ z = (char*)&pIn[i+2];
+ pIn[1] = i+nInt;
+ assert( pIn[1]<=pIn[0] );
+ memcpy(z, zName, nName);
+ z[nName] = 0;
+ return pIn;
+}
+
+/*
+** Return a pointer to the name of a variable in the given VList that
+** has the value iVal. Or return a NULL if there is no such variable in
+** the list
+*/
+SQLITE_PRIVATE const char *sqlite3VListNumToName(VList *pIn, int iVal){
+ int i, mx;
+ if( pIn==0 ) return 0;
+ mx = pIn[1];
+ i = 2;
+ do{
+ if( pIn[i]==iVal ) return (char*)&pIn[i+2];
+ i += pIn[i+1];
+ }while( i<mx );
+ return 0;
+}
+
+/*
+** Return the number of the variable named zName, if it is in VList.
+** or return 0 if there is no such variable.
+*/
+SQLITE_PRIVATE int sqlite3VListNameToNum(VList *pIn, const char *zName, int nName){
+ int i, mx;
+ if( pIn==0 ) return 0;
+ mx = pIn[1];
+ i = 2;
+ do{
+ const char *z = (const char*)&pIn[i+2];
+ if( strncmp(z,zName,nName)==0 && z[nName]==0 ) return pIn[i];
+ i += pIn[i+1];
+ }while( i<mx );
+ return 0;
+}
+
/************** End of util.c ************************************************/
/************** Begin file hash.c ********************************************/
/*
@@ -29082,7 +29479,7 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){
/* 64 */ "Program" OpHelp(""),
/* 65 */ "FkIfZero" OpHelp("if fkctr[P1]==0 goto P2"),
/* 66 */ "IfPos" OpHelp("if r[P1]>0 then r[P1]-=P3, goto P2"),
- /* 67 */ "IfNotZero" OpHelp("if r[P1]!=0 then r[P1]-=P3, goto P2"),
+ /* 67 */ "IfNotZero" OpHelp("if r[P1]!=0 then r[P1]--, goto P2"),
/* 68 */ "DecrJumpZero" OpHelp("if (--r[P1])==0 goto P2"),
/* 69 */ "IncrVacuum" OpHelp(""),
/* 70 */ "VNext" OpHelp(""),
@@ -29136,48 +29533,47 @@ SQLITE_PRIVATE const char *sqlite3OpcodeName(int i){
/* 118 */ "ResetCount" OpHelp(""),
/* 119 */ "SorterCompare" OpHelp("if key(P1)!=trim(r[P3],P4) goto P2"),
/* 120 */ "SorterData" OpHelp("r[P2]=data"),
- /* 121 */ "RowKey" OpHelp("r[P2]=key"),
- /* 122 */ "RowData" OpHelp("r[P2]=data"),
- /* 123 */ "Rowid" OpHelp("r[P2]=rowid"),
- /* 124 */ "NullRow" OpHelp(""),
- /* 125 */ "SorterInsert" OpHelp(""),
- /* 126 */ "IdxInsert" OpHelp("key=r[P2]"),
- /* 127 */ "IdxDelete" OpHelp("key=r[P2@P3]"),
- /* 128 */ "Seek" OpHelp("Move P3 to P1.rowid"),
- /* 129 */ "IdxRowid" OpHelp("r[P2]=rowid"),
- /* 130 */ "Destroy" OpHelp(""),
- /* 131 */ "Clear" OpHelp(""),
+ /* 121 */ "RowData" OpHelp("r[P2]=data"),
+ /* 122 */ "Rowid" OpHelp("r[P2]=rowid"),
+ /* 123 */ "NullRow" OpHelp(""),
+ /* 124 */ "SorterInsert" OpHelp("key=r[P2]"),
+ /* 125 */ "IdxInsert" OpHelp("key=r[P2]"),
+ /* 126 */ "IdxDelete" OpHelp("key=r[P2@P3]"),
+ /* 127 */ "Seek" OpHelp("Move P3 to P1.rowid"),
+ /* 128 */ "IdxRowid" OpHelp("r[P2]=rowid"),
+ /* 129 */ "Destroy" OpHelp(""),
+ /* 130 */ "Clear" OpHelp(""),
+ /* 131 */ "ResetSorter" OpHelp(""),
/* 132 */ "Real" OpHelp("r[P2]=P4"),
- /* 133 */ "ResetSorter" OpHelp(""),
- /* 134 */ "CreateIndex" OpHelp("r[P2]=root iDb=P1"),
- /* 135 */ "CreateTable" OpHelp("r[P2]=root iDb=P1"),
- /* 136 */ "ParseSchema" OpHelp(""),
- /* 137 */ "LoadAnalysis" OpHelp(""),
- /* 138 */ "DropTable" OpHelp(""),
- /* 139 */ "DropIndex" OpHelp(""),
- /* 140 */ "DropTrigger" OpHelp(""),
- /* 141 */ "IntegrityCk" OpHelp(""),
- /* 142 */ "RowSetAdd" OpHelp("rowset(P1)=r[P2]"),
- /* 143 */ "Param" OpHelp(""),
- /* 144 */ "FkCounter" OpHelp("fkctr[P1]+=P2"),
- /* 145 */ "MemMax" OpHelp("r[P1]=max(r[P1],r[P2])"),
- /* 146 */ "OffsetLimit" OpHelp("if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1)"),
- /* 147 */ "AggStep0" OpHelp("accum=r[P3] step(r[P2@P5])"),
- /* 148 */ "AggStep" OpHelp("accum=r[P3] step(r[P2@P5])"),
- /* 149 */ "AggFinal" OpHelp("accum=r[P1] N=P2"),
- /* 150 */ "Expire" OpHelp(""),
- /* 151 */ "TableLock" OpHelp("iDb=P1 root=P2 write=P3"),
- /* 152 */ "VBegin" OpHelp(""),
- /* 153 */ "VCreate" OpHelp(""),
- /* 154 */ "VDestroy" OpHelp(""),
- /* 155 */ "VOpen" OpHelp(""),
- /* 156 */ "VColumn" OpHelp("r[P3]=vcolumn(P2)"),
- /* 157 */ "VRename" OpHelp(""),
- /* 158 */ "Pagecount" OpHelp(""),
- /* 159 */ "MaxPgcnt" OpHelp(""),
- /* 160 */ "CursorHint" OpHelp(""),
- /* 161 */ "Noop" OpHelp(""),
- /* 162 */ "Explain" OpHelp(""),
+ /* 133 */ "CreateIndex" OpHelp("r[P2]=root iDb=P1"),
+ /* 134 */ "CreateTable" OpHelp("r[P2]=root iDb=P1"),
+ /* 135 */ "ParseSchema" OpHelp(""),
+ /* 136 */ "LoadAnalysis" OpHelp(""),
+ /* 137 */ "DropTable" OpHelp(""),
+ /* 138 */ "DropIndex" OpHelp(""),
+ /* 139 */ "DropTrigger" OpHelp(""),
+ /* 140 */ "IntegrityCk" OpHelp(""),
+ /* 141 */ "RowSetAdd" OpHelp("rowset(P1)=r[P2]"),
+ /* 142 */ "Param" OpHelp(""),
+ /* 143 */ "FkCounter" OpHelp("fkctr[P1]+=P2"),
+ /* 144 */ "MemMax" OpHelp("r[P1]=max(r[P1],r[P2])"),
+ /* 145 */ "OffsetLimit" OpHelp("if r[P1]>0 then r[P2]=r[P1]+max(0,r[P3]) else r[P2]=(-1)"),
+ /* 146 */ "AggStep0" OpHelp("accum=r[P3] step(r[P2@P5])"),
+ /* 147 */ "AggStep" OpHelp("accum=r[P3] step(r[P2@P5])"),
+ /* 148 */ "AggFinal" OpHelp("accum=r[P1] N=P2"),
+ /* 149 */ "Expire" OpHelp(""),
+ /* 150 */ "TableLock" OpHelp("iDb=P1 root=P2 write=P3"),
+ /* 151 */ "VBegin" OpHelp(""),
+ /* 152 */ "VCreate" OpHelp(""),
+ /* 153 */ "VDestroy" OpHelp(""),
+ /* 154 */ "VOpen" OpHelp(""),
+ /* 155 */ "VColumn" OpHelp("r[P3]=vcolumn(P2)"),
+ /* 156 */ "VRename" OpHelp(""),
+ /* 157 */ "Pagecount" OpHelp(""),
+ /* 158 */ "MaxPgcnt" OpHelp(""),
+ /* 159 */ "CursorHint" OpHelp(""),
+ /* 160 */ "Noop" OpHelp(""),
+ /* 161 */ "Explain" OpHelp(""),
};
return azName[i];
}
@@ -30446,7 +30842,14 @@ struct unixFileId {
#if OS_VXWORKS
struct vxworksFileId *pId; /* Unique file ID for vxworks. */
#else
- ino_t ino; /* Inode number */
+ /* We are told that some versions of Android contain a bug that
+ ** sizes ino_t at only 32-bits instead of 64-bits. (See
+ ** https://android-review.googlesource.com/#/c/115351/3/dist/sqlite3.c)
+ ** To work around this, always allocate 64-bits for the inode number.
+ ** On small machines that only have 32-bit inodes, this wastes 4 bytes,
+ ** but that should not be a big deal. */
+ /* WAS: ino_t ino; */
+ u64 ino; /* Inode number */
#endif
};
@@ -30691,7 +31094,7 @@ static int findInodeInfo(
#if OS_VXWORKS
fileId.pId = pFile->pId;
#else
- fileId.ino = statbuf.st_ino;
+ fileId.ino = (u64)statbuf.st_ino;
#endif
pInode = inodeList;
while( pInode && memcmp(&fileId, &pInode->fileId, sizeof(fileId)) ){
@@ -30725,7 +31128,8 @@ static int fileHasMoved(unixFile *pFile){
#else
struct stat buf;
return pFile->pInode!=0 &&
- (osStat(pFile->zPath, &buf)!=0 || buf.st_ino!=pFile->pInode->fileId.ino);
+ (osStat(pFile->zPath, &buf)!=0
+ || (u64)buf.st_ino!=pFile->pInode->fileId.ino);
#endif
}
@@ -34897,7 +35301,7 @@ static UnixUnusedFd *findReusableFd(const char *zPath, int flags){
unixEnterMutex();
pInode = inodeList;
while( pInode && (pInode->fileId.dev!=sStat.st_dev
- || pInode->fileId.ino!=sStat.st_ino) ){
+ || pInode->fileId.ino!=(u64)sStat.st_ino) ){
pInode = pInode->pNext;
}
if( pInode ){
@@ -43458,7 +43862,7 @@ SQLITE_PRIVATE u32 sqlite3BitvecSize(Bitvec *p){
return p->iSize;
}
-#ifndef SQLITE_OMIT_BUILTIN_TEST
+#ifndef SQLITE_UNTESTABLE
/*
** Let V[] be an array of unsigned characters sufficient to hold
** up to N bits. Let I be an integer between 0 and N. 0<=I<N.
@@ -43573,7 +43977,7 @@ bitvec_end:
sqlite3BitvecDestroy(pBitvec);
return rc;
}
-#endif /* SQLITE_OMIT_BUILTIN_TEST */
+#endif /* SQLITE_UNTESTABLE */
/************** End of bitvec.c **********************************************/
/************** Begin file pcache.c ******************************************/
@@ -43687,7 +44091,7 @@ struct PCache {
SQLITE_PRIVATE int sqlite3PcachePageSanity(PgHdr *pPg){
PCache *pCache;
assert( pPg!=0 );
- assert( pPg->pgno>0 ); /* Page number is 1 or more */
+ assert( pPg->pgno>0 || pPg->pPager==0 ); /* Page number is 1 or more */
pCache = pPg->pCache;
assert( pCache!=0 ); /* Every page has an associated PCache */
if( pPg->flags & PGHDR_CLEAN ){
@@ -43863,6 +44267,12 @@ SQLITE_PRIVATE int sqlite3PcacheSize(void){ return sizeof(PCache); }
** has already been allocated and is passed in as the p pointer.
** The caller discovers how much space needs to be allocated by
** calling sqlite3PcacheSize().
+**
+** szExtra is some extra space allocated for each page. The first
+** 8 bytes of the extra space will be zeroed as the page is allocated,
+** but remaining content will be uninitialized. Though it is opaque
+** to this module, the extra space really ends up being the MemPage
+** structure in the pager.
*/
SQLITE_PRIVATE int sqlite3PcacheOpen(
int szPage, /* Size of every page */
@@ -43875,6 +44285,7 @@ SQLITE_PRIVATE int sqlite3PcacheOpen(
memset(p, 0, sizeof(PCache));
p->szPage = 1;
p->szExtra = szExtra;
+ assert( szExtra>=8 ); /* First 8 bytes will be zeroed */
p->bPurgeable = bPurgeable;
p->eCreate = 2;
p->xStress = xStress;
@@ -43944,7 +44355,6 @@ SQLITE_PRIVATE sqlite3_pcache_page *sqlite3PcacheFetch(
assert( pCache!=0 );
assert( pCache->pCache!=0 );
assert( createFlag==3 || createFlag==0 );
- assert( pgno>0 );
assert( pCache->eCreate==((pCache->bPurgeable && pCache->pDirty) ? 1 : 2) );
/* eCreate defines what to do if the page does not exist.
@@ -44044,7 +44454,7 @@ static SQLITE_NOINLINE PgHdr *pcacheFetchFinishWithInit(
pPgHdr->pPage = pPage;
pPgHdr->pData = pPage->pBuf;
pPgHdr->pExtra = (void *)&pPgHdr[1];
- memset(pPgHdr->pExtra, 0, pCache->szExtra);
+ memset(pPgHdr->pExtra, 0, 8);
pPgHdr->pCache = pCache;
pPgHdr->pgno = pgno;
pPgHdr->flags = PGHDR_CLEAN;
@@ -46268,7 +46678,7 @@ SQLITE_PRIVATE int sqlite3RowSetTest(RowSet *pRowSet, int iBatch, sqlite3_int64
#ifdef SQLITE_OMIT_WAL
# define sqlite3WalOpen(x,y,z) 0
# define sqlite3WalLimit(x,y)
-# define sqlite3WalClose(w,x,y,z) 0
+# define sqlite3WalClose(v,w,x,y,z) 0
# define sqlite3WalBeginReadTransaction(y,z) 0
# define sqlite3WalEndReadTransaction(z)
# define sqlite3WalDbsize(y) 0
@@ -46278,7 +46688,7 @@ SQLITE_PRIVATE int sqlite3RowSetTest(RowSet *pRowSet, int iBatch, sqlite3_int64
# define sqlite3WalSavepoint(y,z)
# define sqlite3WalSavepointUndo(y,z) 0
# define sqlite3WalFrames(u,v,w,x,y,z) 0
-# define sqlite3WalCheckpoint(r,s,t,u,v,w,x,y,z) 0
+# define sqlite3WalCheckpoint(q,r,s,t,u,v,w,x,y,z) 0
# define sqlite3WalCallback(z) 0
# define sqlite3WalExclusiveMode(y,z) 0
# define sqlite3WalHeapMemory(z) 0
@@ -46296,7 +46706,7 @@ typedef struct Wal Wal;
/* Open and close a connection to a write-ahead log. */
SQLITE_PRIVATE int sqlite3WalOpen(sqlite3_vfs*, sqlite3_file*, const char *, int, i64, Wal**);
-SQLITE_PRIVATE int sqlite3WalClose(Wal *pWal, int sync_flags, int, u8 *);
+SQLITE_PRIVATE int sqlite3WalClose(Wal *pWal, sqlite3*, int sync_flags, int, u8 *);
/* Set the limiting size of a WAL file. */
SQLITE_PRIVATE void sqlite3WalLimit(Wal*, i64);
@@ -46339,6 +46749,7 @@ SQLITE_PRIVATE int sqlite3WalFrames(Wal *pWal, int, PgHdr *, Pgno, int, int);
/* Copy pages from the log to the database file */
SQLITE_PRIVATE int sqlite3WalCheckpoint(
Wal *pWal, /* Write-ahead log connection */
+ sqlite3 *db, /* Check this handle's interrupt flag */
int eMode, /* One of PASSIVE, FULL and RESTART */
int (*xBusy)(void*), /* Function to call when busy */
void *pBusyArg, /* Context argument for xBusyHandler */
@@ -46370,6 +46781,7 @@ SQLITE_PRIVATE int sqlite3WalHeapMemory(Wal *pWal);
#ifdef SQLITE_ENABLE_SNAPSHOT
SQLITE_PRIVATE int sqlite3WalSnapshotGet(Wal *pWal, sqlite3_snapshot **ppSnapshot);
SQLITE_PRIVATE void sqlite3WalSnapshotOpen(Wal *pWal, sqlite3_snapshot *pSnapshot);
+SQLITE_PRIVATE int sqlite3WalSnapshotRecover(Wal *pWal);
#endif
#ifdef SQLITE_ENABLE_ZIPVFS
@@ -47059,6 +47471,7 @@ struct Pager {
int nRead; /* Database pages read */
#endif
void (*xReiniter)(DbPage*); /* Call this routine when reloading pages */
+ int (*xGet)(Pager*,Pgno,DbPage**,int); /* Routine to fetch a patch */
#ifdef SQLITE_HAS_CODEC
void *(*xCodec)(void*,void*,Pgno,int); /* Routine for en/decoding data */
void (*xCodecSizeChng)(void*,int,int); /* Notify of page size changes */
@@ -47179,14 +47592,20 @@ static const unsigned char aJournalMagic[] = {
#define isOpen(pFd) ((pFd)->pMethods!=0)
/*
-** Return true if this pager uses a write-ahead log instead of the usual
-** rollback journal. Otherwise false.
+** Return true if this pager uses a write-ahead log to read page pgno.
+** Return false if the pager reads pgno directly from the database.
*/
-#ifndef SQLITE_OMIT_WAL
-SQLITE_PRIVATE int sqlite3PagerUseWal(Pager *pPager){
- return (pPager->pWal!=0);
+#if !defined(SQLITE_OMIT_WAL) && defined(SQLITE_DIRECT_OVERFLOW_READ)
+SQLITE_PRIVATE int sqlite3PagerUseWal(Pager *pPager, Pgno pgno){
+ u32 iRead = 0;
+ int rc;
+ if( pPager->pWal==0 ) return 0;
+ rc = sqlite3WalFindFrame(pPager->pWal, pgno, &iRead);
+ return rc || iRead;
}
-# define pagerUseWal(x) sqlite3PagerUseWal(x)
+#endif
+#ifndef SQLITE_OMIT_WAL
+# define pagerUseWal(x) ((x)->pWal!=0)
#else
# define pagerUseWal(x) 0
# define pagerRollbackWal(x) 0
@@ -47385,6 +47804,33 @@ static char *print_pager_state(Pager *p){
}
#endif
+/* Forward references to the various page getters */
+static int getPageNormal(Pager*,Pgno,DbPage**,int);
+static int getPageError(Pager*,Pgno,DbPage**,int);
+#if SQLITE_MAX_MMAP_SIZE>0
+static int getPageMMap(Pager*,Pgno,DbPage**,int);
+#endif
+
+/*
+** Set the Pager.xGet method for the appropriate routine used to fetch
+** content from the pager.
+*/
+static void setGetterMethod(Pager *pPager){
+ if( pPager->errCode ){
+ pPager->xGet = getPageError;
+#if SQLITE_MAX_MMAP_SIZE>0
+ }else if( USEFETCH(pPager)
+#ifdef SQLITE_HAS_CODEC
+ && pPager->xCodec==0
+#endif
+ ){
+ pPager->xGet = getPageMMap;
+#endif /* SQLITE_MAX_MMAP_SIZE>0 */
+ }else{
+ pPager->xGet = getPageNormal;
+ }
+}
+
/*
** Return true if it is necessary to write page *pPg into the sub-journal.
** A page needs to be written into the sub-journal if there exists one
@@ -48199,6 +48645,7 @@ static void pager_unlock(Pager *pPager){
}
if( USEFETCH(pPager) ) sqlite3OsUnfetch(pPager->fd, 0, 0);
pPager->errCode = SQLITE_OK;
+ setGetterMethod(pPager);
}
pPager->journalOff = 0;
@@ -48236,6 +48683,7 @@ static int pager_error(Pager *pPager, int rc){
if( rc2==SQLITE_FULL || rc2==SQLITE_IOERR ){
pPager->errCode = rc;
pPager->eState = PAGER_ERROR;
+ setGetterMethod(pPager);
}
return rc;
}
@@ -48404,7 +48852,7 @@ static int pager_end_transaction(Pager *pPager, int hasMaster, int bCommit){
pPager->pInJournal = 0;
pPager->nRec = 0;
if( rc==SQLITE_OK ){
- if( pagerFlushOnCommit(pPager, bCommit) ){
+ if( MEMDB || pagerFlushOnCommit(pPager, bCommit) ){
sqlite3PcacheCleanAll(pPager->pPCache);
}else{
sqlite3PcacheClearWritable(pPager->pPCache);
@@ -49803,6 +50251,7 @@ static void pagerFixMaplimit(Pager *pPager){
sqlite3_int64 sz;
sz = pPager->szMmap;
pPager->bUseFetch = (sz>0);
+ setGetterMethod(pPager);
sqlite3OsFileControlHint(pPager->fd, SQLITE_FCNTL_MMAP_SIZE, &sz);
}
#endif
@@ -50299,6 +50748,7 @@ static int pagerSyncHotJournal(Pager *pPager){
return rc;
}
+#if SQLITE_MAX_MMAP_SIZE>0
/*
** Obtain a reference to a memory mapped page object for page number pgno.
** The new object will use the pointer pData, obtained from xFetch().
@@ -50321,7 +50771,8 @@ static int pagerAcquireMapPage(
*ppPage = p = pPager->pMmapFreelist;
pPager->pMmapFreelist = p->pDirty;
p->pDirty = 0;
- memset(p->pExtra, 0, pPager->nExtra);
+ assert( pPager->nExtra>=8 );
+ memset(p->pExtra, 0, 8);
}else{
*ppPage = p = (PgHdr *)sqlite3MallocZero(sizeof(PgHdr) + pPager->nExtra);
if( p==0 ){
@@ -50346,6 +50797,7 @@ static int pagerAcquireMapPage(
return SQLITE_OK;
}
+#endif
/*
** Release a reference to page pPg. pPg must have been returned by an
@@ -50388,9 +50840,10 @@ static void pagerFreeMapHdrs(Pager *pPager){
** a hot journal may be left in the filesystem but no error is returned
** to the caller.
*/
-SQLITE_PRIVATE int sqlite3PagerClose(Pager *pPager){
+SQLITE_PRIVATE int sqlite3PagerClose(Pager *pPager, sqlite3 *db){
u8 *pTmp = (u8 *)pPager->pTmpSpace;
+ assert( db || pagerUseWal(pPager)==0 );
assert( assert_pager_state(pPager) );
disable_simulated_io_errors();
sqlite3BeginBenignMalloc();
@@ -50398,7 +50851,10 @@ SQLITE_PRIVATE int sqlite3PagerClose(Pager *pPager){
/* pPager->errCode = 0; */
pPager->exclusiveMode = 0;
#ifndef SQLITE_OMIT_WAL
- sqlite3WalClose(pPager->pWal, pPager->ckptSyncFlags, pPager->pageSize, pTmp);
+ assert( db || pPager->pWal==0 );
+ sqlite3WalClose(pPager->pWal, db, pPager->ckptSyncFlags, pPager->pageSize,
+ (db && (db->flags & SQLITE_NoCkptOnClose) ? 0 : pTmp)
+ );
pPager->pWal = 0;
#endif
pager_reset(pPager);
@@ -50917,7 +51373,9 @@ SQLITE_PRIVATE int sqlite3PagerFlush(Pager *pPager){
**
** The nExtra parameter specifies the number of bytes of space allocated
** along with each page reference. This space is available to the user
-** via the sqlite3PagerGetExtra() API.
+** via the sqlite3PagerGetExtra() API. When a new page is allocated, the
+** first 8 bytes of this space are zeroed but the remainder is uninitialized.
+** (The extra space is used by btree as the MemPage object.)
**
** The flags argument is used to specify properties that affect the
** operation of the pager. It should be passed some bitwise combination
@@ -51147,8 +51605,8 @@ act_like_temp_file:
/* Initialize the PCache object. */
if( rc==SQLITE_OK ){
- assert( nExtra<1000 );
nExtra = ROUND8(nExtra);
+ assert( nExtra>=8 && nExtra<1000 );
rc = sqlite3PcacheOpen(szPageDflt, nExtra, !memDb,
!memDb?pagerStress:0, (void *)pPager, pPager->pPCache);
}
@@ -51213,6 +51671,7 @@ act_like_temp_file:
/* pPager->xBusyHandler = 0; */
/* pPager->pBusyHandlerArg = 0; */
pPager->xReiniter = xReinit;
+ setGetterMethod(pPager);
/* memset(pPager->aHash, 0, sizeof(pPager->aHash)); */
/* pPager->szMmap = SQLITE_DEFAULT_MMAP_SIZE // will be set by btree.c */
@@ -51626,10 +52085,17 @@ static void pagerUnlockIfUnused(Pager *pPager){
}
/*
-** Acquire a reference to page number pgno in pager pPager (a page
-** reference has type DbPage*). If the requested reference is
+** The page getter methods each try to acquire a reference to a
+** page with page number pgno. If the requested reference is
** successfully obtained, it is copied to *ppPage and SQLITE_OK returned.
**
+** There are different implementations of the getter method depending
+** on the current state of the pager.
+**
+** getPageNormal() -- The normal getter
+** getPageError() -- Used if the pager is in an error state
+** getPageMmap() -- Used if memory-mapped I/O is enabled
+**
** If the requested page is already in the cache, it is returned.
** Otherwise, a new page object is allocated and populated with data
** read from the database file. In some cases, the pcache module may
@@ -51641,14 +52107,14 @@ static void pagerUnlockIfUnused(Pager *pPager){
** already in the cache when this function is called, then the extra
** data is left as it was when the page object was last used.
**
-** If the database image is smaller than the requested page or if a
-** non-zero value is passed as the noContent parameter and the
+** If the database image is smaller than the requested page or if
+** the flags parameter contains the PAGER_GET_NOCONTENT bit and the
** requested page is not already stored in the cache, then no
** actual disk read occurs. In this case the memory image of the
** page is initialized to all zeros.
**
-** If noContent is true, it means that we do not care about the contents
-** of the page. This occurs in two scenarios:
+** If PAGER_GET_NOCONTENT is true, it means that we do not care about
+** the contents of the page. This occurs in two scenarios:
**
** a) When reading a free-list leaf page from the database, and
**
@@ -51656,8 +52122,8 @@ static void pagerUnlockIfUnused(Pager *pPager){
** a new page into the cache to be filled with the data read
** from the savepoint journal.
**
-** If noContent is true, then the data returned is zeroed instead of
-** being read from the database. Additionally, the bits corresponding
+** If PAGER_GET_NOCONTENT is true, then the data returned is zeroed instead
+** of being read from the database. Additionally, the bits corresponding
** to pgno in Pager.pInJournal (bitvec of pages already written to the
** journal file) and the PagerSavepoint.pInSavepoint bitvecs of any open
** savepoints are set. This means if the page is made writable at any
@@ -51675,106 +52141,39 @@ static void pagerUnlockIfUnused(Pager *pPager){
** Since Lookup() never goes to disk, it never has to deal with locks
** or journal files.
*/
-SQLITE_PRIVATE int sqlite3PagerGet(
+static int getPageNormal(
Pager *pPager, /* The pager open on the database file */
Pgno pgno, /* Page number to fetch */
DbPage **ppPage, /* Write a pointer to the page here */
int flags /* PAGER_GET_XXX flags */
){
int rc = SQLITE_OK;
- PgHdr *pPg = 0;
- u32 iFrame = 0; /* Frame to read from WAL file */
- const int noContent = (flags & PAGER_GET_NOCONTENT);
-
- /* It is acceptable to use a read-only (mmap) page for any page except
- ** page 1 if there is no write-transaction open or the ACQUIRE_READONLY
- ** flag was specified by the caller. And so long as the db is not a
- ** temporary or in-memory database. */
- const int bMmapOk = (pgno>1 && USEFETCH(pPager)
- && (pPager->eState==PAGER_READER || (flags & PAGER_GET_READONLY))
-#ifdef SQLITE_HAS_CODEC
- && pPager->xCodec==0
-#endif
- );
+ PgHdr *pPg;
+ u8 noContent; /* True if PAGER_GET_NOCONTENT is set */
+ sqlite3_pcache_page *pBase;
- /* Optimization note: Adding the "pgno<=1" term before "pgno==0" here
- ** allows the compiler optimizer to reuse the results of the "pgno>1"
- ** test in the previous statement, and avoid testing pgno==0 in the
- ** common case where pgno is large. */
- if( pgno<=1 && pgno==0 ){
- return SQLITE_CORRUPT_BKPT;
- }
+ assert( pPager->errCode==SQLITE_OK );
assert( pPager->eState>=PAGER_READER );
assert( assert_pager_state(pPager) );
- assert( noContent==0 || bMmapOk==0 );
-
assert( pPager->hasHeldSharedLock==1 );
- /* If the pager is in the error state, return an error immediately.
- ** Otherwise, request the page from the PCache layer. */
- if( pPager->errCode!=SQLITE_OK ){
- rc = pPager->errCode;
- }else{
- if( bMmapOk && pagerUseWal(pPager) ){
- rc = sqlite3WalFindFrame(pPager->pWal, pgno, &iFrame);
- if( rc!=SQLITE_OK ) goto pager_acquire_err;
- }
-
- if( bMmapOk && iFrame==0 ){
- void *pData = 0;
-
- rc = sqlite3OsFetch(pPager->fd,
- (i64)(pgno-1) * pPager->pageSize, pPager->pageSize, &pData
- );
-
- if( rc==SQLITE_OK && pData ){
- if( pPager->eState>PAGER_READER || pPager->tempFile ){
- pPg = sqlite3PagerLookup(pPager, pgno);
- }
- if( pPg==0 ){
- rc = pagerAcquireMapPage(pPager, pgno, pData, &pPg);
- }else{
- sqlite3OsUnfetch(pPager->fd, (i64)(pgno-1)*pPager->pageSize, pData);
- }
- if( pPg ){
- assert( rc==SQLITE_OK );
- *ppPage = pPg;
- return SQLITE_OK;
- }
- }
- if( rc!=SQLITE_OK ){
- goto pager_acquire_err;
- }
- }
-
- {
- sqlite3_pcache_page *pBase;
- pBase = sqlite3PcacheFetch(pPager->pPCache, pgno, 3);
- if( pBase==0 ){
- rc = sqlite3PcacheFetchStress(pPager->pPCache, pgno, &pBase);
- if( rc!=SQLITE_OK ) goto pager_acquire_err;
- if( pBase==0 ){
- pPg = *ppPage = 0;
- rc = SQLITE_NOMEM_BKPT;
- goto pager_acquire_err;
- }
- }
- pPg = *ppPage = sqlite3PcacheFetchFinish(pPager->pPCache, pgno, pBase);
- assert( pPg!=0 );
- }
- }
-
- if( rc!=SQLITE_OK ){
- /* Either the call to sqlite3PcacheFetch() returned an error or the
- ** pager was already in the error-state when this function was called.
- ** Set pPg to 0 and jump to the exception handler. */
+ if( pgno==0 ) return SQLITE_CORRUPT_BKPT;
+ pBase = sqlite3PcacheFetch(pPager->pPCache, pgno, 3);
+ if( pBase==0 ){
pPg = 0;
- goto pager_acquire_err;
+ rc = sqlite3PcacheFetchStress(pPager->pPCache, pgno, &pBase);
+ if( rc!=SQLITE_OK ) goto pager_acquire_err;
+ if( pBase==0 ){
+ rc = SQLITE_NOMEM_BKPT;
+ goto pager_acquire_err;
+ }
}
+ pPg = *ppPage = sqlite3PcacheFetchFinish(pPager->pPCache, pgno, pBase);
assert( pPg==(*ppPage) );
assert( pPg->pgno==pgno );
assert( pPg->pPager==pPager || pPg->pPager==0 );
+ noContent = (flags & PAGER_GET_NOCONTENT)!=0;
if( pPg->pPager && !noContent ){
/* In this case the pcache already contains an initialized copy of
** the page. Return without further ado. */
@@ -51784,17 +52183,18 @@ SQLITE_PRIVATE int sqlite3PagerGet(
}else{
/* The pager cache has created a new page. Its content needs to
- ** be initialized. */
-
- pPg->pPager = pPager;
-
- /* The maximum page number is 2^31. Return SQLITE_CORRUPT if a page
- ** number greater than this, or the unused locking-page, is requested. */
+ ** be initialized. But first some error checks:
+ **
+ ** (1) The maximum page number is 2^31
+ ** (2) Never try to fetch the locking page
+ */
if( pgno>PAGER_MAX_PGNO || pgno==PAGER_MJ_PGNO(pPager) ){
rc = SQLITE_CORRUPT_BKPT;
goto pager_acquire_err;
}
+ pPg->pPager = pPager;
+
assert( !isOpen(pPager->fd) || !MEMDB );
if( !isOpen(pPager->fd) || pPager->dbSize<pgno || noContent ){
if( pgno>pPager->mxPgno ){
@@ -51820,7 +52220,8 @@ SQLITE_PRIVATE int sqlite3PagerGet(
memset(pPg->pData, 0, pPager->pageSize);
IOTRACE(("ZERO %p %d\n", pPager, pgno));
}else{
- if( pagerUseWal(pPager) && bMmapOk==0 ){
+ u32 iFrame = 0; /* Frame to read from WAL file */
+ if( pagerUseWal(pPager) ){
rc = sqlite3WalFindFrame(pPager->pWal, pgno, &iFrame);
if( rc!=SQLITE_OK ) goto pager_acquire_err;
}
@@ -51833,7 +52234,6 @@ SQLITE_PRIVATE int sqlite3PagerGet(
}
pager_set_pagehash(pPg);
}
-
return SQLITE_OK;
pager_acquire_err:
@@ -51842,11 +52242,109 @@ pager_acquire_err:
sqlite3PcacheDrop(pPg);
}
pagerUnlockIfUnused(pPager);
-
*ppPage = 0;
return rc;
}
+#if SQLITE_MAX_MMAP_SIZE>0
+/* The page getter for when memory-mapped I/O is enabled */
+static int getPageMMap(
+ Pager *pPager, /* The pager open on the database file */
+ Pgno pgno, /* Page number to fetch */
+ DbPage **ppPage, /* Write a pointer to the page here */
+ int flags /* PAGER_GET_XXX flags */
+){
+ int rc = SQLITE_OK;
+ PgHdr *pPg = 0;
+ u32 iFrame = 0; /* Frame to read from WAL file */
+
+ /* It is acceptable to use a read-only (mmap) page for any page except
+ ** page 1 if there is no write-transaction open or the ACQUIRE_READONLY
+ ** flag was specified by the caller. And so long as the db is not a
+ ** temporary or in-memory database. */
+ const int bMmapOk = (pgno>1
+ && (pPager->eState==PAGER_READER || (flags & PAGER_GET_READONLY))
+ );
+
+ assert( USEFETCH(pPager) );
+#ifdef SQLITE_HAS_CODEC
+ assert( pPager->xCodec==0 );
+#endif
+
+ /* Optimization note: Adding the "pgno<=1" term before "pgno==0" here
+ ** allows the compiler optimizer to reuse the results of the "pgno>1"
+ ** test in the previous statement, and avoid testing pgno==0 in the
+ ** common case where pgno is large. */
+ if( pgno<=1 && pgno==0 ){
+ return SQLITE_CORRUPT_BKPT;
+ }
+ assert( pPager->eState>=PAGER_READER );
+ assert( assert_pager_state(pPager) );
+ assert( pPager->hasHeldSharedLock==1 );
+ assert( pPager->errCode==SQLITE_OK );
+
+ if( bMmapOk && pagerUseWal(pPager) ){
+ rc = sqlite3WalFindFrame(pPager->pWal, pgno, &iFrame);
+ if( rc!=SQLITE_OK ){
+ *ppPage = 0;
+ return rc;
+ }
+ }
+ if( bMmapOk && iFrame==0 ){
+ void *pData = 0;
+ rc = sqlite3OsFetch(pPager->fd,
+ (i64)(pgno-1) * pPager->pageSize, pPager->pageSize, &pData
+ );
+ if( rc==SQLITE_OK && pData ){
+ if( pPager->eState>PAGER_READER || pPager->tempFile ){
+ pPg = sqlite3PagerLookup(pPager, pgno);
+ }
+ if( pPg==0 ){
+ rc = pagerAcquireMapPage(pPager, pgno, pData, &pPg);
+ }else{
+ sqlite3OsUnfetch(pPager->fd, (i64)(pgno-1)*pPager->pageSize, pData);
+ }
+ if( pPg ){
+ assert( rc==SQLITE_OK );
+ *ppPage = pPg;
+ return SQLITE_OK;
+ }
+ }
+ if( rc!=SQLITE_OK ){
+ *ppPage = 0;
+ return rc;
+ }
+ }
+ return getPageNormal(pPager, pgno, ppPage, flags);
+}
+#endif /* SQLITE_MAX_MMAP_SIZE>0 */
+
+/* The page getter method for when the pager is an error state */
+static int getPageError(
+ Pager *pPager, /* The pager open on the database file */
+ Pgno pgno, /* Page number to fetch */
+ DbPage **ppPage, /* Write a pointer to the page here */
+ int flags /* PAGER_GET_XXX flags */
+){
+ UNUSED_PARAMETER(pgno);
+ UNUSED_PARAMETER(flags);
+ assert( pPager->errCode!=SQLITE_OK );
+ *ppPage = 0;
+ return pPager->errCode;
+}
+
+
+/* Dispatch all page fetch requests to the appropriate getter method.
+*/
+SQLITE_PRIVATE int sqlite3PagerGet(
+ Pager *pPager, /* The pager open on the database file */
+ Pgno pgno, /* Page number to fetch */
+ DbPage **ppPage, /* Write a pointer to the page here */
+ int flags /* PAGER_GET_XXX flags */
+){
+ return pPager->xGet(pPager, pgno, ppPage, flags);
+}
+
/*
** Acquire a page if it is already in the in-memory cache. Do
** not read the page from disk. Return a pointer to the page,
@@ -52320,11 +52818,11 @@ SQLITE_PRIVATE int sqlite3PagerWrite(PgHdr *pPg){
assert( (pPg->flags & PGHDR_MMAP)==0 );
assert( pPager->eState>=PAGER_WRITER_LOCKED );
assert( assert_pager_state(pPager) );
- if( pPager->errCode ){
- return pPager->errCode;
- }else if( (pPg->flags & PGHDR_WRITEABLE)!=0 && pPager->dbSize>=pPg->pgno ){
+ if( (pPg->flags & PGHDR_WRITEABLE)!=0 && pPager->dbSize>=pPg->pgno ){
if( pPager->nSavepoint ) return subjournalPageIfRequired(pPg);
return SQLITE_OK;
+ }else if( pPager->errCode ){
+ return pPager->errCode;
}else if( pPager->sectorSize > (u32)pPager->pageSize ){
assert( pPager->tempFile==0 );
return pagerWriteLargeSector(pPg);
@@ -52819,6 +53317,7 @@ SQLITE_PRIVATE int sqlite3PagerRollback(Pager *pPager){
*/
pPager->errCode = SQLITE_ABORT;
pPager->eState = PAGER_ERROR;
+ setGetterMethod(pPager);
return rc;
}
}else{
@@ -53080,6 +53579,7 @@ SQLITE_PRIVATE int sqlite3PagerSavepoint(Pager *pPager, int op, int iSavepoint){
){
pPager->errCode = SQLITE_ABORT;
pPager->eState = PAGER_ERROR;
+ setGetterMethod(pPager);
}
#endif
}
@@ -53152,6 +53652,7 @@ SQLITE_PRIVATE void sqlite3PagerSetCodec(
pPager->xCodecSizeChng = xCodecSizeChng;
pPager->xCodecFree = xCodecFree;
pPager->pCodec = pCodec;
+ setGetterMethod(pPager);
pagerReportSize(pPager);
}
SQLITE_PRIVATE void *sqlite3PagerGetCodec(Pager *pPager){
@@ -53561,10 +54062,16 @@ SQLITE_PRIVATE void sqlite3PagerClearCache(Pager *pPager){
**
** Parameter eMode is one of SQLITE_CHECKPOINT_PASSIVE, FULL or RESTART.
*/
-SQLITE_PRIVATE int sqlite3PagerCheckpoint(Pager *pPager, int eMode, int *pnLog, int *pnCkpt){
+SQLITE_PRIVATE int sqlite3PagerCheckpoint(
+ Pager *pPager, /* Checkpoint on this pager */
+ sqlite3 *db, /* Db handle used to check for interrupts */
+ int eMode, /* Type of checkpoint */
+ int *pnLog, /* OUT: Final number of frames in log */
+ int *pnCkpt /* OUT: Final number of checkpointed frames */
+){
int rc = SQLITE_OK;
if( pPager->pWal ){
- rc = sqlite3WalCheckpoint(pPager->pWal, eMode,
+ rc = sqlite3WalCheckpoint(pPager->pWal, db, eMode,
(eMode==SQLITE_CHECKPOINT_PASSIVE ? 0 : pPager->xBusyHandler),
pPager->pBusyHandlerArg,
pPager->ckptSyncFlags, pPager->pageSize, (u8 *)pPager->pTmpSpace,
@@ -53696,7 +54203,7 @@ SQLITE_PRIVATE int sqlite3PagerOpenWal(
** error (SQLITE_BUSY) is returned and the log connection is not closed.
** If successful, the EXCLUSIVE lock is not released before returning.
*/
-SQLITE_PRIVATE int sqlite3PagerCloseWal(Pager *pPager){
+SQLITE_PRIVATE int sqlite3PagerCloseWal(Pager *pPager, sqlite3 *db){
int rc = SQLITE_OK;
assert( pPager->journalMode==PAGER_JOURNALMODE_WAL );
@@ -53724,7 +54231,7 @@ SQLITE_PRIVATE int sqlite3PagerCloseWal(Pager *pPager){
if( rc==SQLITE_OK && pPager->pWal ){
rc = pagerExclusiveLock(pPager);
if( rc==SQLITE_OK ){
- rc = sqlite3WalClose(pPager->pWal, pPager->ckptSyncFlags,
+ rc = sqlite3WalClose(pPager->pWal, db, pPager->ckptSyncFlags,
pPager->pageSize, (u8*)pPager->pTmpSpace);
pPager->pWal = 0;
pagerFixMaplimit(pPager);
@@ -53761,6 +54268,20 @@ SQLITE_PRIVATE int sqlite3PagerSnapshotOpen(Pager *pPager, sqlite3_snapshot *pSn
}
return rc;
}
+
+/*
+** If this is a WAL database, call sqlite3WalSnapshotRecover(). If this
+** is not a WAL database, return an error.
+*/
+SQLITE_PRIVATE int sqlite3PagerSnapshotRecover(Pager *pPager){
+ int rc;
+ if( pPager->pWal ){
+ rc = sqlite3WalSnapshotRecover(pPager->pWal);
+ }else{
+ rc = SQLITE_ERROR;
+ }
+ return rc;
+}
#endif /* SQLITE_ENABLE_SNAPSHOT */
#endif /* !SQLITE_OMIT_WAL */
@@ -55507,6 +56028,7 @@ static void walRestartHdr(Wal *pWal, u32 salt1){
*/
static int walCheckpoint(
Wal *pWal, /* Wal connection */
+ sqlite3 *db, /* Check for interrupts on this handle */
int eMode, /* One of PASSIVE, FULL or RESTART */
int (*xBusy)(void*), /* Function to call when busy */
void *pBusyArg, /* Context argument for xBusyHandler */
@@ -55601,6 +56123,10 @@ static int walCheckpoint(
while( rc==SQLITE_OK && 0==walIteratorNext(pIter, &iDbpage, &iFrame) ){
i64 iOffset;
assert( walFramePgno(pWal, iFrame)==iDbpage );
+ if( db->u1.isInterrupted ){
+ rc = db->mallocFailed ? SQLITE_NOMEM_BKPT : SQLITE_INTERRUPT;
+ break;
+ }
if( iFrame<=nBackfill || iFrame>mxSafeFrame || iDbpage>mxPage ){
continue;
}
@@ -55705,6 +56231,7 @@ static void walLimitSize(Wal *pWal, i64 nMax){
*/
SQLITE_PRIVATE int sqlite3WalClose(
Wal *pWal, /* Wal to close */
+ sqlite3 *db, /* For interrupt flag */
int sync_flags, /* Flags to pass to OsSync() (or 0) */
int nBuf,
u8 *zBuf /* Buffer of at least nBuf bytes */
@@ -55721,13 +56248,14 @@ SQLITE_PRIVATE int sqlite3WalClose(
**
** The EXCLUSIVE lock is not released before returning.
*/
- rc = sqlite3OsLock(pWal->pDbFd, SQLITE_LOCK_EXCLUSIVE);
- if( rc==SQLITE_OK ){
+ if( zBuf!=0
+ && SQLITE_OK==(rc = sqlite3OsLock(pWal->pDbFd, SQLITE_LOCK_EXCLUSIVE))
+ ){
if( pWal->exclusiveMode==WAL_NORMAL_MODE ){
pWal->exclusiveMode = WAL_EXCLUSIVE_MODE;
}
- rc = sqlite3WalCheckpoint(
- pWal, SQLITE_CHECKPOINT_PASSIVE, 0, 0, sync_flags, nBuf, zBuf, 0, 0
+ rc = sqlite3WalCheckpoint(pWal, db,
+ SQLITE_CHECKPOINT_PASSIVE, 0, 0, sync_flags, nBuf, zBuf, 0, 0
);
if( rc==SQLITE_OK ){
int bPersist = -1;
@@ -56156,6 +56684,84 @@ static int walTryBeginRead(Wal *pWal, int *pChanged, int useWal, int cnt){
return rc;
}
+#ifdef SQLITE_ENABLE_SNAPSHOT
+/*
+** Attempt to reduce the value of the WalCkptInfo.nBackfillAttempted
+** variable so that older snapshots can be accessed. To do this, loop
+** through all wal frames from nBackfillAttempted to (nBackfill+1),
+** comparing their content to the corresponding page with the database
+** file, if any. Set nBackfillAttempted to the frame number of the
+** first frame for which the wal file content matches the db file.
+**
+** This is only really safe if the file-system is such that any page
+** writes made by earlier checkpointers were atomic operations, which
+** is not always true. It is also possible that nBackfillAttempted
+** may be left set to a value larger than expected, if a wal frame
+** contains content that duplicate of an earlier version of the same
+** page.
+**
+** SQLITE_OK is returned if successful, or an SQLite error code if an
+** error occurs. It is not an error if nBackfillAttempted cannot be
+** decreased at all.
+*/
+SQLITE_PRIVATE int sqlite3WalSnapshotRecover(Wal *pWal){
+ int rc;
+
+ assert( pWal->readLock>=0 );
+ rc = walLockExclusive(pWal, WAL_CKPT_LOCK, 1);
+ if( rc==SQLITE_OK ){
+ volatile WalCkptInfo *pInfo = walCkptInfo(pWal);
+ int szPage = (int)pWal->szPage;
+ i64 szDb; /* Size of db file in bytes */
+
+ rc = sqlite3OsFileSize(pWal->pDbFd, &szDb);
+ if( rc==SQLITE_OK ){
+ void *pBuf1 = sqlite3_malloc(szPage);
+ void *pBuf2 = sqlite3_malloc(szPage);
+ if( pBuf1==0 || pBuf2==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ u32 i = pInfo->nBackfillAttempted;
+ for(i=pInfo->nBackfillAttempted; i>pInfo->nBackfill; i--){
+ volatile ht_slot *dummy;
+ volatile u32 *aPgno; /* Array of page numbers */
+ u32 iZero; /* Frame corresponding to aPgno[0] */
+ u32 pgno; /* Page number in db file */
+ i64 iDbOff; /* Offset of db file entry */
+ i64 iWalOff; /* Offset of wal file entry */
+
+ rc = walHashGet(pWal, walFramePage(i), &dummy, &aPgno, &iZero);
+ if( rc!=SQLITE_OK ) break;
+ pgno = aPgno[i-iZero];
+ iDbOff = (i64)(pgno-1) * szPage;
+
+ if( iDbOff+szPage<=szDb ){
+ iWalOff = walFrameOffset(i, szPage) + WAL_FRAME_HDRSIZE;
+ rc = sqlite3OsRead(pWal->pWalFd, pBuf1, szPage, iWalOff);
+
+ if( rc==SQLITE_OK ){
+ rc = sqlite3OsRead(pWal->pDbFd, pBuf2, szPage, iDbOff);
+ }
+
+ if( rc!=SQLITE_OK || 0==memcmp(pBuf1, pBuf2, szPage) ){
+ break;
+ }
+ }
+
+ pInfo->nBackfillAttempted = i-1;
+ }
+ }
+
+ sqlite3_free(pBuf1);
+ sqlite3_free(pBuf2);
+ }
+ walUnlockExclusive(pWal, WAL_CKPT_LOCK, 1);
+ }
+
+ return rc;
+}
+#endif /* SQLITE_ENABLE_SNAPSHOT */
+
/*
** Begin a read transaction on the database.
**
@@ -56218,7 +56824,11 @@ SQLITE_PRIVATE int sqlite3WalBeginReadTransaction(Wal *pWal, int *pChanged){
** has not yet set the pInfo->nBackfillAttempted variable to indicate
** its intent. To avoid the race condition this leads to, ensure that
** there is no checkpointer process by taking a shared CKPT lock
- ** before checking pInfo->nBackfillAttempted. */
+ ** before checking pInfo->nBackfillAttempted.
+ **
+ ** TODO: Does the aReadMark[] lock prevent a checkpointer from doing
+ ** this already?
+ */
rc = walLockShared(pWal, WAL_CKPT_LOCK);
if( rc==SQLITE_OK ){
@@ -56975,6 +57585,7 @@ SQLITE_PRIVATE int sqlite3WalFrames(
*/
SQLITE_PRIVATE int sqlite3WalCheckpoint(
Wal *pWal, /* Wal connection */
+ sqlite3 *db, /* Check this handle's interrupt flag */
int eMode, /* PASSIVE, FULL, RESTART, or TRUNCATE */
int (*xBusy)(void*), /* Function to call when busy */
void *pBusyArg, /* Context argument for xBusyHandler */
@@ -57049,7 +57660,7 @@ SQLITE_PRIVATE int sqlite3WalCheckpoint(
if( pWal->hdr.mxFrame && walPagesize(pWal)!=nBuf ){
rc = SQLITE_CORRUPT_BKPT;
}else{
- rc = walCheckpoint(pWal, eMode2, xBusy2, pBusyArg, sync_flags, zBuf);
+ rc = walCheckpoint(pWal, db, eMode2, xBusy2, pBusyArg, sync_flags, zBuf);
}
/* If no error occurred, set the output variables. */
@@ -57169,9 +57780,14 @@ SQLITE_PRIVATE int sqlite3WalHeapMemory(Wal *pWal){
SQLITE_PRIVATE int sqlite3WalSnapshotGet(Wal *pWal, sqlite3_snapshot **ppSnapshot){
int rc = SQLITE_OK;
WalIndexHdr *pRet;
+ static const u32 aZero[4] = { 0, 0, 0, 0 };
assert( pWal->readLock>=0 && pWal->writeLock==0 );
+ if( memcmp(&pWal->hdr.aFrameCksum[0],aZero,16)==0 ){
+ *ppSnapshot = 0;
+ return SQLITE_ERROR;
+ }
pRet = (WalIndexHdr*)sqlite3_malloc(sizeof(WalIndexHdr));
if( pRet==0 ){
rc = SQLITE_NOMEM_BKPT;
@@ -57509,37 +58125,39 @@ typedef struct CellInfo CellInfo;
#define PTF_LEAF 0x08
/*
-** As each page of the file is loaded into memory, an instance of the following
-** structure is appended and initialized to zero. This structure stores
-** information about the page that is decoded from the raw file page.
+** An instance of this object stores information about each a single database
+** page that has been loaded into memory. The information in this object
+** is derived from the raw on-disk page content.
**
-** The pParent field points back to the parent page. This allows us to
-** walk up the BTree from any leaf to the root. Care must be taken to
-** unref() the parent page pointer when this page is no longer referenced.
-** The pageDestructor() routine handles that chore.
+** As each database page is loaded into memory, the pager allocats an
+** instance of this object and zeros the first 8 bytes. (This is the
+** "extra" information associated with each page of the pager.)
**
** Access to all fields of this structure is controlled by the mutex
** stored in MemPage.pBt->mutex.
*/
struct MemPage {
u8 isInit; /* True if previously initialized. MUST BE FIRST! */
- u8 nOverflow; /* Number of overflow cell bodies in aCell[] */
+ u8 bBusy; /* Prevent endless loops on corrupt database files */
u8 intKey; /* True if table b-trees. False for index b-trees */
u8 intKeyLeaf; /* True if the leaf of an intKey table */
+ Pgno pgno; /* Page number for this page */
+ /* Only the first 8 bytes (above) are zeroed by pager.c when a new page
+ ** is allocated. All fields that follow must be initialized before use */
u8 leaf; /* True if a leaf page */
u8 hdrOffset; /* 100 for page 1. 0 otherwise */
u8 childPtrSize; /* 0 if leaf==1. 4 if leaf==0 */
u8 max1bytePayload; /* min(maxLocal,127) */
- u8 bBusy; /* Prevent endless loops on corrupt database files */
+ u8 nOverflow; /* Number of overflow cell bodies in aCell[] */
u16 maxLocal; /* Copy of BtShared.maxLocal or BtShared.maxLeaf */
u16 minLocal; /* Copy of BtShared.minLocal or BtShared.minLeaf */
u16 cellOffset; /* Index in aData of first cell pointer */
u16 nFree; /* Number of free bytes on the page */
u16 nCell; /* Number of cells on this page, local and ovfl */
u16 maskPage; /* Mask for page offset */
- u16 aiOvfl[5]; /* Insert the i-th overflow cell before the aiOvfl-th
+ u16 aiOvfl[4]; /* Insert the i-th overflow cell before the aiOvfl-th
** non-overflow cell */
- u8 *apOvfl[5]; /* Pointers to the body of overflow cells */
+ u8 *apOvfl[4]; /* Pointers to the body of overflow cells */
BtShared *pBt; /* Pointer to BtShared that this page is part of */
u8 *aData; /* Pointer to disk image of the page data */
u8 *aDataEnd; /* One byte past the end of usable data */
@@ -57548,17 +58166,9 @@ struct MemPage {
DbPage *pDbPage; /* Pager page handle */
u16 (*xCellSize)(MemPage*,u8*); /* cellSizePtr method */
void (*xParseCell)(MemPage*,u8*,CellInfo*); /* btreeParseCell method */
- Pgno pgno; /* Page number for this page */
};
/*
-** The in-memory image of a disk page has the auxiliary information appended
-** to the end. EXTRA_SIZE is the number of bytes of space needed to hold
-** that extra information.
-*/
-#define EXTRA_SIZE sizeof(MemPage)
-
-/*
** A linked list of the following structures is stored at BtShared.pLock.
** Locks are added (or upgraded from READ_LOCK to WRITE_LOCK) when a cursor
** is opened on the table with root page BtShared.iTable. Locks are removed
@@ -57948,11 +58558,9 @@ struct IntegrityCk {
*/
#if SQLITE_BYTEORDER==4321
# define get2byteAligned(x) (*(u16*)(x))
-#elif SQLITE_BYTEORDER==1234 && !defined(SQLITE_DISABLE_INTRINSIC) \
- && GCC_VERSION>=4008000
+#elif SQLITE_BYTEORDER==1234 && GCC_VERSION>=4008000
# define get2byteAligned(x) __builtin_bswap16(*(u16*)(x))
-#elif SQLITE_BYTEORDER==1234 && !defined(SQLITE_DISABLE_INTRINSIC) \
- && defined(_MSC_VER) && _MSC_VER>=1300
+#elif SQLITE_BYTEORDER==1234 && MSVC_VERSION>=1300
# define get2byteAligned(x) _byteswap_ushort(*(u16*)(x))
#else
# define get2byteAligned(x) ((x)[0]<<8 | (x)[1])
@@ -58127,16 +58735,24 @@ SQLITE_PRIVATE int sqlite3BtreeHoldsMutex(Btree *p){
** two or more btrees in common both try to lock all their btrees
** at the same instant.
*/
-SQLITE_PRIVATE void sqlite3BtreeEnterAll(sqlite3 *db){
+static void SQLITE_NOINLINE btreeEnterAll(sqlite3 *db){
int i;
+ int skipOk = 1;
Btree *p;
assert( sqlite3_mutex_held(db->mutex) );
for(i=0; i<db->nDb; i++){
p = db->aDb[i].pBt;
- if( p ) sqlite3BtreeEnter(p);
+ if( p && p->sharable ){
+ sqlite3BtreeEnter(p);
+ skipOk = 0;
+ }
}
+ db->skipBtreeMutex = skipOk;
}
-SQLITE_PRIVATE void sqlite3BtreeLeaveAll(sqlite3 *db){
+SQLITE_PRIVATE void sqlite3BtreeEnterAll(sqlite3 *db){
+ if( db->skipBtreeMutex==0 ) btreeEnterAll(db);
+}
+static void SQLITE_NOINLINE btreeLeaveAll(sqlite3 *db){
int i;
Btree *p;
assert( sqlite3_mutex_held(db->mutex) );
@@ -58145,6 +58761,9 @@ SQLITE_PRIVATE void sqlite3BtreeLeaveAll(sqlite3 *db){
if( p ) sqlite3BtreeLeave(p);
}
}
+SQLITE_PRIVATE void sqlite3BtreeLeaveAll(sqlite3 *db){
+ if( db->skipBtreeMutex==0 ) btreeLeaveAll(db);
+}
#ifndef NDEBUG
/*
@@ -58876,7 +59495,7 @@ static int saveCursorKey(BtCursor *pCur){
pCur->nKey = sqlite3BtreePayloadSize(pCur);
pKey = sqlite3Malloc( pCur->nKey );
if( pKey ){
- rc = sqlite3BtreeKey(pCur, 0, (int)pCur->nKey, pKey);
+ rc = sqlite3BtreePayload(pCur, 0, (int)pCur->nKey, pKey);
if( rc==SQLITE_OK ){
pCur->pKey = pKey;
}else{
@@ -59007,26 +59626,23 @@ static int btreeMoveto(
){
int rc; /* Status code */
UnpackedRecord *pIdxKey; /* Unpacked index key */
- char aSpace[384]; /* Temp space for pIdxKey - to avoid a malloc */
- char *pFree = 0;
if( pKey ){
assert( nKey==(i64)(int)nKey );
- pIdxKey = sqlite3VdbeAllocUnpackedRecord(
- pCur->pKeyInfo, aSpace, sizeof(aSpace), &pFree
- );
+ pIdxKey = sqlite3VdbeAllocUnpackedRecord(pCur->pKeyInfo);
if( pIdxKey==0 ) return SQLITE_NOMEM_BKPT;
sqlite3VdbeRecordUnpack(pCur->pKeyInfo, (int)nKey, pKey, pIdxKey);
if( pIdxKey->nField==0 ){
- sqlite3DbFree(pCur->pKeyInfo->db, pFree);
- return SQLITE_CORRUPT_BKPT;
+ rc = SQLITE_CORRUPT_BKPT;
+ goto moveto_done;
}
}else{
pIdxKey = 0;
}
rc = sqlite3BtreeMovetoUnpacked(pCur, pIdxKey, nKey, bias, pRes);
- if( pFree ){
- sqlite3DbFree(pCur->pKeyInfo->db, pFree);
+moveto_done:
+ if( pIdxKey ){
+ sqlite3DbFree(pCur->pKeyInfo->db, pIdxKey);
}
return rc;
}
@@ -59987,7 +60603,7 @@ static int btreeInitPage(MemPage *pPage){
assert( pPage->aData == sqlite3PagerGetData(pPage->pDbPage) );
if( !pPage->isInit ){
- u16 pc; /* Address of a freeblock within pPage->aData[] */
+ int pc; /* Address of a freeblock within pPage->aData[] */
u8 hdr; /* Offset to beginning of page header */
u8 *data; /* Equal to pPage->aData */
BtShared *pBt; /* The main btree structure */
@@ -60067,25 +60683,30 @@ static int btreeInitPage(MemPage *pPage){
** freeblocks. */
pc = get2byte(&data[hdr+1]);
nFree = data[hdr+7] + top; /* Init nFree to non-freeblock free space */
- while( pc>0 ){
- u16 next, size;
- if( pc<iCellFirst || pc>iCellLast ){
+ if( pc>0 ){
+ u32 next, size;
+ if( pc<iCellFirst ){
/* EVIDENCE-OF: R-55530-52930 In a well-formed b-tree page, there will
** always be at least one cell before the first freeblock.
- **
- ** Or, the freeblock is off the end of the page
*/
return SQLITE_CORRUPT_BKPT;
}
- next = get2byte(&data[pc]);
- size = get2byte(&data[pc+2]);
- if( (next>0 && next<=pc+size+3) || pc+size>usableSize ){
- /* Free blocks must be in ascending order. And the last byte of
- ** the free-block must lie on the database page. */
- return SQLITE_CORRUPT_BKPT;
+ while( 1 ){
+ if( pc>iCellLast ){
+ return SQLITE_CORRUPT_BKPT; /* Freeblock off the end of the page */
+ }
+ next = get2byte(&data[pc]);
+ size = get2byte(&data[pc+2]);
+ nFree = nFree + size;
+ if( next<=pc+size+3 ) break;
+ pc = next;
+ }
+ if( next>0 ){
+ return SQLITE_CORRUPT_BKPT; /* Freeblock not in ascending order */
+ }
+ if( pc+size>(unsigned int)usableSize ){
+ return SQLITE_CORRUPT_BKPT; /* Last freeblock extends past page end */
}
- nFree = nFree + size;
- pc = next;
}
/* At this point, nFree contains the sum of the offset to the start
@@ -60526,7 +61147,7 @@ SQLITE_PRIVATE int sqlite3BtreeOpen(
goto btree_open_out;
}
rc = sqlite3PagerOpen(pVfs, &pBt->pPager, zFilename,
- EXTRA_SIZE, flags, vfsFlags, pageReinit);
+ sizeof(MemPage), flags, vfsFlags, pageReinit);
if( rc==SQLITE_OK ){
sqlite3PagerSetMmapLimit(pBt->pPager, db->szMmap);
rc = sqlite3PagerReadFileheader(pBt->pPager,sizeof(zDbHeader),zDbHeader);
@@ -60639,12 +61260,14 @@ SQLITE_PRIVATE int sqlite3BtreeOpen(
btree_open_out:
if( rc!=SQLITE_OK ){
if( pBt && pBt->pPager ){
- sqlite3PagerClose(pBt->pPager);
+ sqlite3PagerClose(pBt->pPager, 0);
}
sqlite3_free(pBt);
sqlite3_free(p);
*ppBtree = 0;
}else{
+ sqlite3_file *pFile;
+
/* If the B-Tree was successfully opened, set the pager-cache size to the
** default value. Except, when opening on an existing shared pager-cache,
** do not change the pager-cache size.
@@ -60652,6 +61275,11 @@ btree_open_out:
if( sqlite3BtreeSchema(p, 0, 0)==0 ){
sqlite3PagerSetCachesize(p->pBt->pPager, SQLITE_DEFAULT_CACHE_SIZE);
}
+
+ pFile = sqlite3PagerFile(pBt->pPager);
+ if( pFile->pMethods ){
+ sqlite3OsFileControlHint(pFile, SQLITE_FCNTL_PDB, (void*)&pBt->db);
+ }
}
if( mutexOpen ){
assert( sqlite3_mutex_held(mutexOpen) );
@@ -60781,7 +61409,7 @@ SQLITE_PRIVATE int sqlite3BtreeClose(Btree *p){
** Clean out and delete the BtShared object.
*/
assert( !pBt->pCursor );
- sqlite3PagerClose(pBt->pPager);
+ sqlite3PagerClose(pBt->pPager, p->db);
if( pBt->xFreeSchema && pBt->pSchema ){
pBt->xFreeSchema(pBt->pSchema);
}
@@ -61528,14 +62156,11 @@ static int setChildPtrmaps(MemPage *pPage){
int nCell; /* Number of cells in page pPage */
int rc; /* Return code */
BtShared *pBt = pPage->pBt;
- u8 isInitOrig = pPage->isInit;
Pgno pgno = pPage->pgno;
assert( sqlite3_mutex_held(pPage->pBt->mutex) );
rc = btreeInitPage(pPage);
- if( rc!=SQLITE_OK ){
- goto set_child_ptrmaps_out;
- }
+ if( rc!=SQLITE_OK ) return rc;
nCell = pPage->nCell;
for(i=0; i<nCell; i++){
@@ -61554,8 +62179,6 @@ static int setChildPtrmaps(MemPage *pPage){
ptrmapPut(pBt, childPgno, PTRMAP_BTREE, pgno, &rc);
}
-set_child_ptrmaps_out:
- pPage->isInit = isInitOrig;
return rc;
}
@@ -61583,7 +62206,6 @@ static int modifyPagePointer(MemPage *pPage, Pgno iFrom, Pgno iTo, u8 eType){
}
put4byte(pPage->aData, iTo);
}else{
- u8 isInitOrig = pPage->isInit;
int i;
int nCell;
int rc;
@@ -61597,12 +62219,14 @@ static int modifyPagePointer(MemPage *pPage, Pgno iFrom, Pgno iTo, u8 eType){
if( eType==PTRMAP_OVERFLOW1 ){
CellInfo info;
pPage->xParseCell(pPage, pCell, &info);
- if( info.nLocal<info.nPayload
- && pCell+info.nSize-1<=pPage->aData+pPage->maskPage
- && iFrom==get4byte(pCell+info.nSize-4)
- ){
- put4byte(pCell+info.nSize-4, iTo);
- break;
+ if( info.nLocal<info.nPayload ){
+ if( pCell+info.nSize > pPage->aData+pPage->pBt->usableSize ){
+ return SQLITE_CORRUPT_BKPT;
+ }
+ if( iFrom==get4byte(pCell+info.nSize-4) ){
+ put4byte(pCell+info.nSize-4, iTo);
+ break;
+ }
}
}else{
if( get4byte(pCell)==iFrom ){
@@ -61619,8 +62243,6 @@ static int modifyPagePointer(MemPage *pPage, Pgno iFrom, Pgno iTo, u8 eType){
}
put4byte(&pPage->aData[pPage->hdrOffset+8], iTo);
}
-
- pPage->isInit = isInitOrig;
}
return SQLITE_OK;
}
@@ -62279,7 +62901,12 @@ SQLITE_PRIVATE int sqlite3BtreeSavepoint(Btree *p, int op, int iSavepoint){
assert( op==SAVEPOINT_RELEASE || op==SAVEPOINT_ROLLBACK );
assert( iSavepoint>=0 || (iSavepoint==-1 && op==SAVEPOINT_ROLLBACK) );
sqlite3BtreeEnter(p);
- rc = sqlite3PagerSavepoint(pBt->pPager, op, iSavepoint);
+ if( op==SAVEPOINT_ROLLBACK ){
+ rc = saveAllCursors(pBt, 0, 0);
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3PagerSavepoint(pBt->pPager, op, iSavepoint);
+ }
if( rc==SQLITE_OK ){
if( iSavepoint<0 && (pBt->btsFlags & BTS_INITIALLY_EMPTY)!=0 ){
pBt->nPage = 0;
@@ -62515,6 +63142,10 @@ SQLITE_PRIVATE int sqlite3BtreeCursorIsValid(BtCursor *pCur){
return pCur && pCur->eState==CURSOR_VALID;
}
#endif /* NDEBUG */
+SQLITE_PRIVATE int sqlite3BtreeCursorIsValidNN(BtCursor *pCur){
+ assert( pCur!=0 );
+ return pCur->eState==CURSOR_VALID;
+}
/*
** Return the value of the integer key or "rowid" for a table btree.
@@ -62661,7 +63292,6 @@ static int copyPayload(
**
** 0: The operation is a read. Populate the overflow cache.
** 1: The operation is a write. Populate the overflow cache.
-** 2: The operation is a read. Do not populate the overflow cache.
**
** A total of "amt" bytes are read or written beginning at "offset".
** Data is read to or from the buffer pBuf.
@@ -62669,13 +63299,13 @@ static int copyPayload(
** The content being read or written might appear on the main page
** or be scattered out on multiple overflow pages.
**
-** If the current cursor entry uses one or more overflow pages and the
-** eOp argument is not 2, this function may allocate space for and lazily
-** populates the overflow page-list cache array (BtCursor.aOverflow).
+** If the current cursor entry uses one or more overflow pages
+** this function may allocate space for and lazily populate
+** the overflow page-list cache array (BtCursor.aOverflow).
** Subsequent calls use this cache to make seeking to the supplied offset
** more efficient.
**
-** Once an overflow page-list cache has been allocated, it may be
+** Once an overflow page-list cache has been allocated, it must be
** invalidated if some other cursor writes to the same table, or if
** the cursor is moved to a different row. Additionally, in auto-vacuum
** mode, the following events may invalidate an overflow page-list cache.
@@ -62697,21 +63327,17 @@ static int accessPayload(
MemPage *pPage = pCur->apPage[pCur->iPage]; /* Btree page of current entry */
BtShared *pBt = pCur->pBt; /* Btree this cursor belongs to */
#ifdef SQLITE_DIRECT_OVERFLOW_READ
- unsigned char * const pBufStart = pBuf;
- int bEnd; /* True if reading to end of data */
+ unsigned char * const pBufStart = pBuf; /* Start of original out buffer */
#endif
assert( pPage );
+ assert( eOp==0 || eOp==1 );
assert( pCur->eState==CURSOR_VALID );
assert( pCur->aiIdx[pCur->iPage]<pPage->nCell );
assert( cursorHoldsMutex(pCur) );
- assert( eOp!=2 || offset==0 ); /* Always start from beginning for eOp==2 */
getCellInfo(pCur);
aPayload = pCur->info.pPayload;
-#ifdef SQLITE_DIRECT_OVERFLOW_READ
- bEnd = offset+amt==pCur->info.nPayload;
-#endif
assert( offset+amt <= pCur->info.nPayload );
assert( aPayload > pPage->aData );
@@ -62730,7 +63356,7 @@ static int accessPayload(
if( a+offset>pCur->info.nLocal ){
a = pCur->info.nLocal - offset;
}
- rc = copyPayload(&aPayload[offset], pBuf, a, (eOp & 0x01), pPage->pDbPage);
+ rc = copyPayload(&aPayload[offset], pBuf, a, eOp, pPage->pDbPage);
offset = 0;
pBuf += a;
amt -= a;
@@ -62746,53 +63372,46 @@ static int accessPayload(
nextPage = get4byte(&aPayload[pCur->info.nLocal]);
/* If the BtCursor.aOverflow[] has not been allocated, allocate it now.
- ** Except, do not allocate aOverflow[] for eOp==2.
**
** The aOverflow[] array is sized at one entry for each overflow page
** in the overflow chain. The page number of the first overflow page is
** stored in aOverflow[0], etc. A value of 0 in the aOverflow[] array
** means "not yet known" (the cache is lazily populated).
*/
- if( eOp!=2 && (pCur->curFlags & BTCF_ValidOvfl)==0 ){
+ if( (pCur->curFlags & BTCF_ValidOvfl)==0 ){
int nOvfl = (pCur->info.nPayload-pCur->info.nLocal+ovflSize-1)/ovflSize;
if( nOvfl>pCur->nOvflAlloc ){
Pgno *aNew = (Pgno*)sqlite3Realloc(
pCur->aOverflow, nOvfl*2*sizeof(Pgno)
);
if( aNew==0 ){
- rc = SQLITE_NOMEM_BKPT;
+ return SQLITE_NOMEM_BKPT;
}else{
pCur->nOvflAlloc = nOvfl*2;
pCur->aOverflow = aNew;
}
}
- if( rc==SQLITE_OK ){
- memset(pCur->aOverflow, 0, nOvfl*sizeof(Pgno));
- pCur->curFlags |= BTCF_ValidOvfl;
+ memset(pCur->aOverflow, 0, nOvfl*sizeof(Pgno));
+ pCur->curFlags |= BTCF_ValidOvfl;
+ }else{
+ /* If the overflow page-list cache has been allocated and the
+ ** entry for the first required overflow page is valid, skip
+ ** directly to it.
+ */
+ if( pCur->aOverflow[offset/ovflSize] ){
+ iIdx = (offset/ovflSize);
+ nextPage = pCur->aOverflow[iIdx];
+ offset = (offset%ovflSize);
}
}
- /* If the overflow page-list cache has been allocated and the
- ** entry for the first required overflow page is valid, skip
- ** directly to it.
- */
- if( (pCur->curFlags & BTCF_ValidOvfl)!=0
- && pCur->aOverflow[offset/ovflSize]
- ){
- iIdx = (offset/ovflSize);
- nextPage = pCur->aOverflow[iIdx];
- offset = (offset%ovflSize);
- }
-
- for( ; rc==SQLITE_OK && amt>0 && nextPage; iIdx++){
-
+ assert( rc==SQLITE_OK && amt>0 );
+ while( nextPage ){
/* If required, populate the overflow page-list cache. */
- if( (pCur->curFlags & BTCF_ValidOvfl)!=0 ){
- assert( pCur->aOverflow[iIdx]==0
- || pCur->aOverflow[iIdx]==nextPage
- || CORRUPT_DB );
- pCur->aOverflow[iIdx] = nextPage;
- }
+ assert( pCur->aOverflow[iIdx]==0
+ || pCur->aOverflow[iIdx]==nextPage
+ || CORRUPT_DB );
+ pCur->aOverflow[iIdx] = nextPage;
if( offset>=ovflSize ){
/* The only reason to read this page is to obtain the page
@@ -62800,11 +63419,7 @@ static int accessPayload(
** data is not required. So first try to lookup the overflow
** page-list cache, if any, then fall back to the getOverflowPage()
** function.
- **
- ** Note that the aOverflow[] array must be allocated because eOp!=2
- ** here. If eOp==2, then offset==0 and this branch is never taken.
*/
- assert( eOp!=2 );
assert( pCur->curFlags & BTCF_ValidOvfl );
assert( pCur->pBtree->db==pBt->db );
if( pCur->aOverflow[iIdx+1] ){
@@ -62818,7 +63433,7 @@ static int accessPayload(
** range of data that is being read (eOp==0) or written (eOp!=0).
*/
#ifdef SQLITE_DIRECT_OVERFLOW_READ
- sqlite3_file *fd;
+ sqlite3_file *fd; /* File from which to do direct overflow read */
#endif
int a = amt;
if( a + offset > ovflSize ){
@@ -62830,27 +63445,25 @@ static int accessPayload(
**
** 1) this is a read operation, and
** 2) data is required from the start of this overflow page, and
- ** 3) the database is file-backed, and
- ** 4) there is no open write-transaction, and
- ** 5) the database is not a WAL database,
- ** 6) all data from the page is being read.
- ** 7) at least 4 bytes have already been read into the output buffer
+ ** 3) there is no open write-transaction, and
+ ** 4) the database is file-backed, and
+ ** 5) the page is not in the WAL file
+ ** 6) at least 4 bytes have already been read into the output buffer
**
** then data can be read directly from the database file into the
** output buffer, bypassing the page-cache altogether. This speeds
** up loading large records that span many overflow pages.
*/
- if( (eOp&0x01)==0 /* (1) */
+ if( eOp==0 /* (1) */
&& offset==0 /* (2) */
- && (bEnd || a==ovflSize) /* (6) */
- && pBt->inTransaction==TRANS_READ /* (4) */
- && (fd = sqlite3PagerFile(pBt->pPager))->pMethods /* (3) */
- && 0==sqlite3PagerUseWal(pBt->pPager) /* (5) */
- && &pBuf[-4]>=pBufStart /* (7) */
+ && pBt->inTransaction==TRANS_READ /* (3) */
+ && (fd = sqlite3PagerFile(pBt->pPager))->pMethods /* (4) */
+ && 0==sqlite3PagerUseWal(pBt->pPager, nextPage) /* (5) */
+ && &pBuf[-4]>=pBufStart /* (6) */
){
u8 aSave[4];
u8 *aWrite = &pBuf[-4];
- assert( aWrite>=pBufStart ); /* hence (7) */
+ assert( aWrite>=pBufStart ); /* due to (6) */
memcpy(aSave, aWrite, 4);
rc = sqlite3OsRead(fd, aWrite, a+4, (i64)pBt->pageSize*(nextPage-1));
nextPage = get4byte(aWrite);
@@ -62861,41 +63474,49 @@ static int accessPayload(
{
DbPage *pDbPage;
rc = sqlite3PagerGet(pBt->pPager, nextPage, &pDbPage,
- ((eOp&0x01)==0 ? PAGER_GET_READONLY : 0)
+ (eOp==0 ? PAGER_GET_READONLY : 0)
);
if( rc==SQLITE_OK ){
aPayload = sqlite3PagerGetData(pDbPage);
nextPage = get4byte(aPayload);
- rc = copyPayload(&aPayload[offset+4], pBuf, a, (eOp&0x01), pDbPage);
+ rc = copyPayload(&aPayload[offset+4], pBuf, a, eOp, pDbPage);
sqlite3PagerUnref(pDbPage);
offset = 0;
}
}
amt -= a;
+ if( amt==0 ) return rc;
pBuf += a;
}
+ if( rc ) break;
+ iIdx++;
}
}
if( rc==SQLITE_OK && amt>0 ){
- return SQLITE_CORRUPT_BKPT;
+ return SQLITE_CORRUPT_BKPT; /* Overflow chain ends prematurely */
}
return rc;
}
/*
-** Read part of the key associated with cursor pCur. Exactly
-** "amt" bytes will be transferred into pBuf[]. The transfer
+** Read part of the payload for the row at which that cursor pCur is currently
+** pointing. "amt" bytes will be transferred into pBuf[]. The transfer
** begins at "offset".
**
-** The caller must ensure that pCur is pointing to a valid row
-** in the table.
+** pCur can be pointing to either a table or an index b-tree.
+** If pointing to a table btree, then the content section is read. If
+** pCur is pointing to an index b-tree then the key section is read.
+**
+** For sqlite3BtreePayload(), the caller must ensure that pCur is pointing
+** to a valid row in the table. For sqlite3BtreePayloadChecked(), the
+** cursor might be invalid or might need to be restored before being read.
**
** Return SQLITE_OK on success or an error code if anything goes
** wrong. An error is returned if "offset+amt" is larger than
** the available payload.
*/
-SQLITE_PRIVATE int sqlite3BtreeKey(BtCursor *pCur, u32 offset, u32 amt, void *pBuf){
+SQLITE_PRIVATE int sqlite3BtreePayload(BtCursor *pCur, u32 offset, u32 amt, void *pBuf){
assert( cursorHoldsMutex(pCur) );
assert( pCur->eState==CURSOR_VALID );
assert( pCur->iPage>=0 && pCur->apPage[pCur->iPage] );
@@ -62904,33 +63525,34 @@ SQLITE_PRIVATE int sqlite3BtreeKey(BtCursor *pCur, u32 offset, u32 amt, void *pB
}
/*
-** Read part of the data associated with cursor pCur. Exactly
-** "amt" bytes will be transfered into pBuf[]. The transfer
-** begins at "offset".
-**
-** Return SQLITE_OK on success or an error code if anything goes
-** wrong. An error is returned if "offset+amt" is larger than
-** the available payload.
+** This variant of sqlite3BtreePayload() works even if the cursor has not
+** in the CURSOR_VALID state. It is only used by the sqlite3_blob_read()
+** interface.
*/
-SQLITE_PRIVATE int sqlite3BtreeData(BtCursor *pCur, u32 offset, u32 amt, void *pBuf){
- int rc;
-
#ifndef SQLITE_OMIT_INCRBLOB
+static SQLITE_NOINLINE int accessPayloadChecked(
+ BtCursor *pCur,
+ u32 offset,
+ u32 amt,
+ void *pBuf
+){
+ int rc;
if ( pCur->eState==CURSOR_INVALID ){
return SQLITE_ABORT;
}
-#endif
-
assert( cursorOwnsBtShared(pCur) );
- rc = restoreCursorPosition(pCur);
- if( rc==SQLITE_OK ){
- assert( pCur->eState==CURSOR_VALID );
- assert( pCur->iPage>=0 && pCur->apPage[pCur->iPage] );
- assert( pCur->aiIdx[pCur->iPage]<pCur->apPage[pCur->iPage]->nCell );
- rc = accessPayload(pCur, offset, amt, pBuf, 0);
+ rc = btreeRestoreCursorPosition(pCur);
+ return rc ? rc : accessPayload(pCur, offset, amt, pBuf, 0);
+}
+SQLITE_PRIVATE int sqlite3BtreePayloadChecked(BtCursor *pCur, u32 offset, u32 amt, void *pBuf){
+ if( pCur->eState==CURSOR_VALID ){
+ assert( cursorOwnsBtShared(pCur) );
+ return accessPayload(pCur, offset, amt, pBuf, 0);
+ }else{
+ return accessPayloadChecked(pCur, offset, amt, pBuf);
}
- return rc;
}
+#endif /* SQLITE_OMIT_INCRBLOB */
/*
** Return a pointer to payload information from the entry that the
@@ -63101,9 +63723,12 @@ static int moveToRoot(BtCursor *pCur){
}
if( pCur->iPage>=0 ){
- while( pCur->iPage ){
- assert( pCur->apPage[pCur->iPage]!=0 );
- releasePageNotNull(pCur->apPage[pCur->iPage--]);
+ if( pCur->iPage ){
+ do{
+ assert( pCur->apPage[pCur->iPage]!=0 );
+ releasePageNotNull(pCur->apPage[pCur->iPage--]);
+ }while( pCur->iPage);
+ goto skip_init;
}
}else if( pCur->pgnoRoot==0 ){
pCur->eState = CURSOR_INVALID;
@@ -63114,7 +63739,7 @@ static int moveToRoot(BtCursor *pCur){
0, pCur->curPagerFlags);
if( rc!=SQLITE_OK ){
pCur->eState = CURSOR_INVALID;
- return rc;
+ return rc;
}
pCur->iPage = 0;
pCur->curIntKey = pCur->apPage[0]->intKey;
@@ -63137,10 +63762,12 @@ static int moveToRoot(BtCursor *pCur){
return SQLITE_CORRUPT_BKPT;
}
+skip_init:
pCur->aiIdx[0] = 0;
pCur->info.nSize = 0;
pCur->curFlags &= ~(BTCF_AtLast|BTCF_ValidNKey|BTCF_ValidOvfl);
+ pRoot = pCur->apPage[0];
if( pRoot->nCell>0 ){
pCur->eState = CURSOR_VALID;
}else if( !pRoot->leaf ){
@@ -63329,9 +63956,26 @@ SQLITE_PRIVATE int sqlite3BtreeMovetoUnpacked(
*pRes = 0;
return SQLITE_OK;
}
- if( (pCur->curFlags & BTCF_AtLast)!=0 && pCur->info.nKey<intKey ){
- *pRes = -1;
- return SQLITE_OK;
+ if( pCur->info.nKey<intKey ){
+ if( (pCur->curFlags & BTCF_AtLast)!=0 ){
+ *pRes = -1;
+ return SQLITE_OK;
+ }
+ /* If the requested key is one more than the previous key, then
+ ** try to get there using sqlite3BtreeNext() rather than a full
+ ** binary search. This is an optimization only. The correct answer
+ ** is still obtained without this ase, only a little more slowely */
+ if( pCur->info.nKey+1==intKey && !pCur->skipNext ){
+ *pRes = 0;
+ rc = sqlite3BtreeNext(pCur, pRes);
+ if( rc ) return rc;
+ if( *pRes==0 ){
+ getCellInfo(pCur);
+ if( pCur->info.nKey==intKey ){
+ return SQLITE_OK;
+ }
+ }
+ }
}
}
@@ -63397,16 +64041,16 @@ SQLITE_PRIVATE int sqlite3BtreeMovetoUnpacked(
if( lwr>upr ){ c = +1; break; }
}else{
assert( nCellKey==intKey );
- pCur->curFlags |= BTCF_ValidNKey;
- pCur->info.nKey = nCellKey;
pCur->aiIdx[pCur->iPage] = (u16)idx;
if( !pPage->leaf ){
lwr = idx;
goto moveto_next_layer;
}else{
+ pCur->curFlags |= BTCF_ValidNKey;
+ pCur->info.nKey = nCellKey;
+ pCur->info.nSize = 0;
*pRes = 0;
- rc = SQLITE_OK;
- goto moveto_finish;
+ return SQLITE_OK;
}
}
assert( lwr+upr>=0 );
@@ -63467,7 +64111,8 @@ SQLITE_PRIVATE int sqlite3BtreeMovetoUnpacked(
goto moveto_finish;
}
pCur->aiIdx[pCur->iPage] = (u16)idx;
- rc = accessPayload(pCur, 0, nCell, (unsigned char*)pCellKey, 2);
+ rc = accessPayload(pCur, 0, nCell, (unsigned char*)pCellKey, 0);
+ pCur->curFlags &= ~BTCF_ValidOvfl;
if( rc ){
sqlite3_free(pCellKey);
goto moveto_finish;
@@ -63517,7 +64162,7 @@ moveto_next_layer:
}
moveto_finish:
pCur->info.nSize = 0;
- pCur->curFlags &= ~(BTCF_ValidNKey|BTCF_ValidOvfl);
+ assert( (pCur->curFlags & BTCF_ValidOvfl)==0 );
return rc;
}
@@ -63715,7 +64360,7 @@ static SQLITE_NOINLINE int btreePrevious(BtCursor *pCur, int *pRes){
moveToParent(pCur);
}
assert( pCur->info.nSize==0 );
- assert( (pCur->curFlags & (BTCF_ValidNKey|BTCF_ValidOvfl))==0 );
+ assert( (pCur->curFlags & (BTCF_ValidOvfl))==0 );
pCur->aiIdx[pCur->iPage]--;
pPage = pCur->apPage[pCur->iPage];
@@ -64231,30 +64876,28 @@ static void freePage(MemPage *pPage, int *pRC){
static int clearCell(
MemPage *pPage, /* The page that contains the Cell */
unsigned char *pCell, /* First byte of the Cell */
- u16 *pnSize /* Write the size of the Cell here */
+ CellInfo *pInfo /* Size information about the cell */
){
BtShared *pBt = pPage->pBt;
- CellInfo info;
Pgno ovflPgno;
int rc;
int nOvfl;
u32 ovflPageSize;
assert( sqlite3_mutex_held(pPage->pBt->mutex) );
- pPage->xParseCell(pPage, pCell, &info);
- *pnSize = info.nSize;
- if( info.nLocal==info.nPayload ){
+ pPage->xParseCell(pPage, pCell, pInfo);
+ if( pInfo->nLocal==pInfo->nPayload ){
return SQLITE_OK; /* No overflow pages. Return without doing anything */
}
- if( pCell+info.nSize-1 > pPage->aData+pPage->maskPage ){
+ if( pCell+pInfo->nSize-1 > pPage->aData+pPage->maskPage ){
return SQLITE_CORRUPT_BKPT; /* Cell extends past end of page */
}
- ovflPgno = get4byte(pCell + info.nSize - 4);
+ ovflPgno = get4byte(pCell + pInfo->nSize - 4);
assert( pBt->usableSize > 4 );
ovflPageSize = pBt->usableSize - 4;
- nOvfl = (info.nPayload - info.nLocal + ovflPageSize - 1)/ovflPageSize;
+ nOvfl = (pInfo->nPayload - pInfo->nLocal + ovflPageSize - 1)/ovflPageSize;
assert( nOvfl>0 ||
- (CORRUPT_DB && (info.nPayload + ovflPageSize)<ovflPageSize)
+ (CORRUPT_DB && (pInfo->nPayload + ovflPageSize)<ovflPageSize)
);
while( nOvfl-- ){
Pgno iNext = 0;
@@ -64494,7 +65137,6 @@ static void dropCell(MemPage *pPage, int idx, int sz, int *pRC){
int hdr; /* Beginning of the header. 0 most pages. 100 page 1 */
if( *pRC ) return;
-
assert( idx>=0 && idx<pPage->nCell );
assert( CORRUPT_DB || sz==cellSize(pPage, idx) );
assert( sqlite3PagerIswriteable(pPage->pDbPage) );
@@ -64578,7 +65220,10 @@ static void insertCell(
put4byte(pCell, iChild);
}
j = pPage->nOverflow++;
- assert( j<(int)(sizeof(pPage->apOvfl)/sizeof(pPage->apOvfl[0])) );
+ /* Comparison against ArraySize-1 since we hold back one extra slot
+ ** as a contingency. In other words, never need more than 3 overflow
+ ** slots but 4 are allocated, just to be safe. */
+ assert( j < ArraySize(pPage->apOvfl)-1 );
pPage->apOvfl[j] = pCell;
pPage->aiOvfl[j] = (u16)i;
@@ -65318,7 +65963,7 @@ static int balance_nonroot(
nMaxCells += 1+apOld[i]->nCell+apOld[i]->nOverflow;
if( (i--)==0 ) break;
- if( i+nxDiv==pParent->aiOvfl[0] && pParent->nOverflow ){
+ if( pParent->nOverflow && i+nxDiv==pParent->aiOvfl[0] ){
apDiv[i] = pParent->apOvfl[0];
pgno = get4byte(apDiv[i]);
szNew[i] = pParent->xCellSize(pParent, apDiv[i]);
@@ -65510,7 +66155,6 @@ static int balance_nonroot(
for(i=0; i<nOld; i++){
MemPage *p = apOld[i];
szNew[i] = usableSpace - p->nFree;
- if( szNew[i]<0 ){ rc = SQLITE_CORRUPT_BKPT; goto balance_cleanup; }
for(j=0; j<p->nOverflow; j++){
szNew[i] += 2 + p->xCellSize(p, p->apOvfl[j]);
}
@@ -66172,22 +66816,24 @@ static int balance(BtCursor *pCur){
** pX.pData,nData,nZero fields must be zero.
**
** If the seekResult parameter is non-zero, then a successful call to
-** MovetoUnpacked() to seek cursor pCur to (pKey, nKey) has already
-** been performed. seekResult is the search result returned (a negative
-** number if pCur points at an entry that is smaller than (pKey, nKey), or
-** a positive value if pCur points at an entry that is larger than
-** (pKey, nKey)).
-**
-** If the seekResult parameter is non-zero, then the caller guarantees that
-** cursor pCur is pointing at the existing copy of a row that is to be
-** overwritten. If the seekResult parameter is 0, then cursor pCur may
-** point to any entry or to no entry at all and so this function has to seek
-** the cursor before the new key can be inserted.
+** MovetoUnpacked() to seek cursor pCur to (pKey,nKey) has already
+** been performed. In other words, if seekResult!=0 then the cursor
+** is currently pointing to a cell that will be adjacent to the cell
+** to be inserted. If seekResult<0 then pCur points to a cell that is
+** smaller then (pKey,nKey). If seekResult>0 then pCur points to a cell
+** that is larger than (pKey,nKey).
+**
+** If seekResult==0, that means pCur is pointing at some unknown location.
+** In that case, this routine must seek the cursor to the correct insertion
+** point for (pKey,nKey) before doing the insertion. For index btrees,
+** if pX->nMem is non-zero, then pX->aMem contains pointers to the unpacked
+** key values and pX->aMem can be used instead of pX->pKey to avoid having
+** to decode the key.
*/
SQLITE_PRIVATE int sqlite3BtreeInsert(
BtCursor *pCur, /* Insert data into the table of this cursor */
const BtreePayload *pX, /* Content of the row to be inserted */
- int appendBias, /* True if this is likely an append */
+ int flags, /* True if this is likely an append */
int seekResult /* Result of prior MovetoUnpacked() call */
){
int rc;
@@ -66200,6 +66846,8 @@ SQLITE_PRIVATE int sqlite3BtreeInsert(
unsigned char *oldCell;
unsigned char *newCell = 0;
+ assert( (flags & (BTREE_SAVEPOSITION|BTREE_APPEND))==flags );
+
if( pCur->eState==CURSOR_FAULT ){
assert( pCur->skipNext!=SQLITE_OK );
return pCur->skipNext;
@@ -66240,18 +66888,38 @@ SQLITE_PRIVATE int sqlite3BtreeInsert(
** cursors open on the row being replaced */
invalidateIncrblobCursors(p, pX->nKey, 0);
+ /* If BTREE_SAVEPOSITION is set, the cursor must already be pointing
+ ** to a row with the same key as the new entry being inserted. */
+ assert( (flags & BTREE_SAVEPOSITION)==0 ||
+ ((pCur->curFlags&BTCF_ValidNKey)!=0 && pX->nKey==pCur->info.nKey) );
+
/* If the cursor is currently on the last row and we are appending a
** new row onto the end, set the "loc" to avoid an unnecessary
** btreeMoveto() call */
- if( (pCur->curFlags&BTCF_ValidNKey)!=0 && pX->nKey>0
- && pCur->info.nKey==pX->nKey-1 ){
- loc = -1;
+ if( (pCur->curFlags&BTCF_ValidNKey)!=0 && pX->nKey==pCur->info.nKey ){
+ loc = 0;
+ }else if( (pCur->curFlags&BTCF_ValidNKey)!=0 && pX->nKey>0
+ && pCur->info.nKey==pX->nKey-1 ){
+ loc = -1;
}else if( loc==0 ){
- rc = sqlite3BtreeMovetoUnpacked(pCur, 0, pX->nKey, appendBias, &loc);
+ rc = sqlite3BtreeMovetoUnpacked(pCur, 0, pX->nKey, flags!=0, &loc);
if( rc ) return rc;
}
- }else if( loc==0 ){
- rc = btreeMoveto(pCur, pX->pKey, pX->nKey, appendBias, &loc);
+ }else if( loc==0 && (flags & BTREE_SAVEPOSITION)==0 ){
+ if( pX->nMem ){
+ UnpackedRecord r;
+ r.pKeyInfo = pCur->pKeyInfo;
+ r.aMem = pX->aMem;
+ r.nField = pX->nMem;
+ r.default_rc = 0;
+ r.errCode = 0;
+ r.r1 = 0;
+ r.r2 = 0;
+ r.eqSeen = 0;
+ rc = sqlite3BtreeMovetoUnpacked(pCur, &r, 0, flags!=0, &loc);
+ }else{
+ rc = btreeMoveto(pCur, pX->pKey, pX->nKey, flags!=0, &loc);
+ }
if( rc ) return rc;
}
assert( pCur->eState==CURSOR_VALID || (pCur->eState==CURSOR_INVALID && loc) );
@@ -66272,7 +66940,7 @@ SQLITE_PRIVATE int sqlite3BtreeInsert(
assert( szNew <= MX_CELL_SIZE(pBt) );
idx = pCur->aiIdx[pCur->iPage];
if( loc==0 ){
- u16 szOld;
+ CellInfo info;
assert( idx<pPage->nCell );
rc = sqlite3PagerWrite(pPage->pDbPage);
if( rc ){
@@ -66282,8 +66950,19 @@ SQLITE_PRIVATE int sqlite3BtreeInsert(
if( !pPage->leaf ){
memcpy(newCell, oldCell, 4);
}
- rc = clearCell(pPage, oldCell, &szOld);
- dropCell(pPage, idx, szOld, &rc);
+ rc = clearCell(pPage, oldCell, &info);
+ if( info.nSize==szNew && info.nLocal==info.nPayload ){
+ /* Overwrite the old cell with the new if they are the same size.
+ ** We could also try to do this if the old cell is smaller, then add
+ ** the leftover space to the free list. But experiments show that
+ ** doing that is no faster then skipping this optimization and just
+ ** calling dropCell() and insertCell(). */
+ assert( rc==SQLITE_OK ); /* clearCell never fails when nLocal==nPayload */
+ if( oldCell+szNew > pPage->aDataEnd ) return SQLITE_CORRUPT_BKPT;
+ memcpy(oldCell, newCell, szNew);
+ return SQLITE_OK;
+ }
+ dropCell(pPage, idx, info.nSize, &rc);
if( rc ) goto end_insert;
}else if( loc<0 && pPage->nCell>0 ){
assert( pPage->leaf );
@@ -66327,6 +67006,20 @@ SQLITE_PRIVATE int sqlite3BtreeInsert(
** from trying to save the current position of the cursor. */
pCur->apPage[pCur->iPage]->nOverflow = 0;
pCur->eState = CURSOR_INVALID;
+ if( (flags & BTREE_SAVEPOSITION) && rc==SQLITE_OK ){
+ rc = moveToRoot(pCur);
+ if( pCur->pKeyInfo ){
+ assert( pCur->pKey==0 );
+ pCur->pKey = sqlite3Malloc( pX->nKey );
+ if( pCur->pKey==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ memcpy(pCur->pKey, pX->pKey, pX->nKey);
+ }
+ }
+ pCur->eState = CURSOR_REQUIRESEEK;
+ pCur->nKey = pX->nKey;
+ }
}
assert( pCur->apPage[pCur->iPage]->nOverflow==0 );
@@ -66359,7 +67052,7 @@ SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor *pCur, u8 flags){
unsigned char *pCell; /* Pointer to cell to delete */
int iCellIdx; /* Index of cell to delete */
int iCellDepth; /* Depth of node containing pCell */
- u16 szCell; /* Size of the cell being deleted */
+ CellInfo info; /* Size of the cell being deleted */
int bSkipnext = 0; /* Leaf cursor in SKIPNEXT state */
u8 bPreserve = flags & BTREE_SAVEPOSITION; /* Keep cursor valid */
@@ -66431,8 +67124,8 @@ SQLITE_PRIVATE int sqlite3BtreeDelete(BtCursor *pCur, u8 flags){
** itself from within the page. */
rc = sqlite3PagerWrite(pPage->pDbPage);
if( rc ) return rc;
- rc = clearCell(pPage, pCell, &szCell);
- dropCell(pPage, iCellIdx, szCell, &rc);
+ rc = clearCell(pPage, pCell, &info);
+ dropCell(pPage, iCellIdx, info.nSize, &rc);
if( rc ) return rc;
/* If the cell deleted was not located on a leaf page, then the cursor
@@ -66682,7 +67375,7 @@ static int clearDatabasePage(
unsigned char *pCell;
int i;
int hdr;
- u16 szCell;
+ CellInfo info;
assert( sqlite3_mutex_held(pBt->mutex) );
if( pgno>btreePagecount(pBt) ){
@@ -66702,7 +67395,7 @@ static int clearDatabasePage(
rc = clearDatabasePage(pBt, get4byte(pCell), 1, pnChange);
if( rc ) goto cleardatabasepage_out;
}
- rc = clearCell(pPage, pCell, &szCell);
+ rc = clearCell(pPage, pCell, &info);
if( rc ) goto cleardatabasepage_out;
}
if( !pPage->leaf ){
@@ -66793,27 +67486,7 @@ static int btreeDropTable(Btree *p, Pgno iTable, int *piMoved){
assert( sqlite3BtreeHoldsMutex(p) );
assert( p->inTrans==TRANS_WRITE );
-
- /* It is illegal to drop a table if any cursors are open on the
- ** database. This is because in auto-vacuum mode the backend may
- ** need to move another root-page to fill a gap left by the deleted
- ** root page. If an open cursor was using this page a problem would
- ** occur.
- **
- ** This error is caught long before control reaches this point.
- */
- if( NEVER(pBt->pCursor) ){
- sqlite3ConnectionBlocked(p->db, pBt->pCursor->pBtree->db);
- return SQLITE_LOCKED_SHAREDCACHE;
- }
-
- /*
- ** It is illegal to drop the sqlite_master table on page 1. But again,
- ** this error is caught long before reaching this point.
- */
- if( NEVER(iTable<2) ){
- return SQLITE_CORRUPT_BKPT;
- }
+ assert( iTable>=2 );
rc = btreeGetPage(pBt, (Pgno)iTable, &pPage, 0);
if( rc ) return rc;
@@ -67721,7 +68394,7 @@ SQLITE_PRIVATE int sqlite3BtreeCheckpoint(Btree *p, int eMode, int *pnLog, int *
if( pBt->inTransaction!=TRANS_NONE ){
rc = SQLITE_LOCKED;
}else{
- rc = sqlite3PagerCheckpoint(pBt->pPager, eMode, pnLog, pnCkpt);
+ rc = sqlite3PagerCheckpoint(pBt->pPager, p->db, eMode, pnLog, pnCkpt);
}
sqlite3BtreeLeave(p);
}
@@ -69697,10 +70370,9 @@ SQLITE_PRIVATE int sqlite3VdbeMemSetStr(
/*
** Move data out of a btree key or data field and into a Mem structure.
-** The data or key is taken from the entry that pCur is currently pointing
+** The data is payload from the entry that pCur is currently pointing
** to. offset and amt determine what portion of the data or key to retrieve.
-** key is true to get the key or false to get data. The result is written
-** into the pMem element.
+** The result is written into the pMem element.
**
** The pMem object must have been initialized. This routine will use
** pMem->zMalloc to hold the content from the btree, if possible. New
@@ -69715,17 +70387,12 @@ static SQLITE_NOINLINE int vdbeMemFromBtreeResize(
BtCursor *pCur, /* Cursor pointing at record to retrieve. */
u32 offset, /* Offset from the start of data to return bytes from. */
u32 amt, /* Number of bytes to return. */
- int key, /* If true, retrieve from the btree key, not data. */
Mem *pMem /* OUT: Return data in this Mem structure. */
){
int rc;
pMem->flags = MEM_Null;
if( SQLITE_OK==(rc = sqlite3VdbeMemClearAndResize(pMem, amt+2)) ){
- if( key ){
- rc = sqlite3BtreeKey(pCur, offset, amt, pMem->z);
- }else{
- rc = sqlite3BtreeData(pCur, offset, amt, pMem->z);
- }
+ rc = sqlite3BtreePayload(pCur, offset, amt, pMem->z);
if( rc==SQLITE_OK ){
pMem->z[amt] = 0;
pMem->z[amt+1] = 0;
@@ -69741,7 +70408,6 @@ SQLITE_PRIVATE int sqlite3VdbeMemFromBtree(
BtCursor *pCur, /* Cursor pointing at record to retrieve. */
u32 offset, /* Offset from the start of data to return bytes from. */
u32 amt, /* Number of bytes to return. */
- int key, /* If true, retrieve from the btree key, not data. */
Mem *pMem /* OUT: Return data in this Mem structure. */
){
char *zData; /* Data from the btree layer */
@@ -69762,7 +70428,7 @@ SQLITE_PRIVATE int sqlite3VdbeMemFromBtree(
pMem->flags = MEM_Blob|MEM_Ephem;
pMem->n = (int)amt;
}else{
- rc = vdbeMemFromBtreeResize(pCur, offset, amt, key, pMem);
+ rc = vdbeMemFromBtreeResize(pCur, offset, amt, pMem);
}
return rc;
@@ -69780,6 +70446,7 @@ static SQLITE_NOINLINE const void *valueToText(sqlite3_value* pVal, u8 enc){
assert( (pVal->flags & MEM_RowSet)==0 );
assert( (pVal->flags & (MEM_Null))==0 );
if( pVal->flags & (MEM_Blob|MEM_Str) ){
+ if( ExpandBlob(pVal) ) return 0;
pVal->flags |= MEM_Str;
if( pVal->enc != (enc & ~SQLITE_UTF16_ALIGNED) ){
sqlite3VdbeChangeEncoding(pVal, enc & ~SQLITE_UTF16_ALIGNED);
@@ -70103,6 +70770,7 @@ static int valueFromExpr(
}else if( op==TK_NULL ){
pVal = valueNew(db, pCtx);
if( pVal==0 ) goto no_mem;
+ sqlite3VdbeMemNumerify(pVal);
}
#ifndef SQLITE_OMIT_BLOB_LITERAL
else if( op==TK_BLOB ){
@@ -70792,7 +71460,11 @@ SQLITE_PRIVATE int sqlite3VdbeAddOp4Int(
int p4 /* The P4 operand as an integer */
){
int addr = sqlite3VdbeAddOp3(p, op, p1, p2, p3);
- sqlite3VdbeChangeP4(p, addr, SQLITE_INT_TO_PTR(p4), P4_INT32);
+ if( p->db->mallocFailed==0 ){
+ VdbeOp *pOp = &p->aOp[addr];
+ pOp->p4type = P4_INT32;
+ pOp->p4.i = p4;
+ }
return addr;
}
@@ -71124,6 +71796,22 @@ SQLITE_PRIVATE void sqlite3VdbeVerifyNoMallocRequired(Vdbe *p, int N){
#endif
/*
+** Verify that the VM passed as the only argument does not contain
+** an OP_ResultRow opcode. Fail an assert() if it does. This is used
+** by code in pragma.c to ensure that the implementation of certain
+** pragmas comports with the flags specified in the mkpragmatab.tcl
+** script.
+*/
+#if defined(SQLITE_DEBUG) && !defined(SQLITE_TEST_REALLOC_STRESS)
+SQLITE_PRIVATE void sqlite3VdbeVerifyNoResultRow(Vdbe *p){
+ int i;
+ for(i=0; i<p->nOp; i++){
+ assert( p->aOp[i].opcode!=OP_ResultRow );
+ }
+}
+#endif
+
+/*
** This function returns a pointer to the array of opcodes associated with
** the Vdbe passed as the first argument. It is the callers responsibility
** to arrange for the returned array to be eventually freed using the
@@ -71242,7 +71930,7 @@ SQLITE_PRIVATE void sqlite3VdbeChangeP2(Vdbe *p, u32 addr, int val){
SQLITE_PRIVATE void sqlite3VdbeChangeP3(Vdbe *p, u32 addr, int val){
sqlite3VdbeGetOp(p,addr)->p3 = val;
}
-SQLITE_PRIVATE void sqlite3VdbeChangeP5(Vdbe *p, u8 p5){
+SQLITE_PRIVATE void sqlite3VdbeChangeP5(Vdbe *p, u16 p5){
assert( p->nOp>0 || p->db->mallocFailed );
if( p->nOp>0 ) p->aOp[p->nOp-1].p5 = p5;
}
@@ -71303,10 +71991,6 @@ static void freeP4(sqlite3 *db, int p4type, void *p4){
break;
}
#endif
- case P4_MPRINTF: {
- if( db->pnBytesFreed==0 ) sqlite3_free(p4);
- break;
- }
case P4_FUNCDEF: {
freeEphemeralFunction(db, (FuncDef*)p4);
break;
@@ -71452,15 +72136,41 @@ SQLITE_PRIVATE void sqlite3VdbeChangeP4(Vdbe *p, int addr, const char *zP4, int
}
/*
+** Change the P4 operand of the most recently coded instruction
+** to the value defined by the arguments. This is a high-speed
+** version of sqlite3VdbeChangeP4().
+**
+** The P4 operand must not have been previously defined. And the new
+** P4 must not be P4_INT32. Use sqlite3VdbeChangeP4() in either of
+** those cases.
+*/
+SQLITE_PRIVATE void sqlite3VdbeAppendP4(Vdbe *p, void *pP4, int n){
+ VdbeOp *pOp;
+ assert( n!=P4_INT32 && n!=P4_VTAB );
+ assert( n<=0 );
+ if( p->db->mallocFailed ){
+ freeP4(p->db, n, pP4);
+ }else{
+ assert( pP4!=0 );
+ assert( p->nOp>0 );
+ pOp = &p->aOp[p->nOp-1];
+ assert( pOp->p4type==P4_NOTUSED );
+ pOp->p4type = n;
+ pOp->p4.p = pP4;
+ }
+}
+
+/*
** Set the P4 on the most recently added opcode to the KeyInfo for the
** index given.
*/
SQLITE_PRIVATE void sqlite3VdbeSetP4KeyInfo(Parse *pParse, Index *pIdx){
Vdbe *v = pParse->pVdbe;
+ KeyInfo *pKeyInfo;
assert( v!=0 );
assert( pIdx!=0 );
- sqlite3VdbeChangeP4(v, -1, (char*)sqlite3KeyInfoOfIndex(pParse, pIdx),
- P4_KEYINFO);
+ pKeyInfo = sqlite3KeyInfoOfIndex(pParse, pIdx);
+ if( pKeyInfo ) sqlite3VdbeAppendP4(v, pKeyInfo, P4_KEYINFO);
}
#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS
@@ -71750,7 +72460,7 @@ static char *displayP4(Op *pOp, char *zTemp, int nTemp){
sqlite3XPrintf(&x, "%s(%d)", pDef->zName, pDef->nArg);
break;
}
-#ifdef SQLITE_DEBUG
+#if defined(SQLITE_DEBUG) || defined(VDBE_PROFILE)
case P4_FUNCCTX: {
FuncDef *pDef = pOp->p4.pCtx->pFunc;
sqlite3XPrintf(&x, "%s(%d)", pDef->zName, pDef->nArg);
@@ -72428,10 +73138,8 @@ SQLITE_PRIVATE void sqlite3VdbeMakeReady(
x.nFree = x.nNeeded;
}while( !db->mallocFailed );
- p->nzVar = pParse->nzVar;
- p->azVar = pParse->azVar;
- pParse->nzVar = 0;
- pParse->azVar = 0;
+ p->pVList = pParse->pVList;
+ pParse->pVList = 0;
p->explain = pParse->explain;
if( db->mallocFailed ){
p->nVar = 0;
@@ -72459,15 +73167,15 @@ SQLITE_PRIVATE void sqlite3VdbeFreeCursor(Vdbe *p, VdbeCursor *pCx){
if( pCx==0 ){
return;
}
- assert( pCx->pBt==0 || pCx->eCurType==CURTYPE_BTREE );
+ assert( pCx->pBtx==0 || pCx->eCurType==CURTYPE_BTREE );
switch( pCx->eCurType ){
case CURTYPE_SORTER: {
sqlite3VdbeSorterClose(p->db, pCx);
break;
}
case CURTYPE_BTREE: {
- if( pCx->pBt ){
- sqlite3BtreeClose(pCx->pBt);
+ if( pCx->pBtx ){
+ sqlite3BtreeClose(pCx->pBtx);
/* The pCx->pCursor will be close automatically, if it exists, by
** the call above. */
}else{
@@ -72936,60 +73644,59 @@ static void checkActiveVdbeCnt(sqlite3 *db){
** If an IO error occurs, an SQLITE_IOERR_XXX error code is returned.
** Otherwise SQLITE_OK.
*/
-SQLITE_PRIVATE int sqlite3VdbeCloseStatement(Vdbe *p, int eOp){
+static SQLITE_NOINLINE int vdbeCloseStatement(Vdbe *p, int eOp){
sqlite3 *const db = p->db;
int rc = SQLITE_OK;
+ int i;
+ const int iSavepoint = p->iStatement-1;
- /* If p->iStatement is greater than zero, then this Vdbe opened a
- ** statement transaction that should be closed here. The only exception
- ** is that an IO error may have occurred, causing an emergency rollback.
- ** In this case (db->nStatement==0), and there is nothing to do.
- */
- if( db->nStatement && p->iStatement ){
- int i;
- const int iSavepoint = p->iStatement-1;
-
- assert( eOp==SAVEPOINT_ROLLBACK || eOp==SAVEPOINT_RELEASE);
- assert( db->nStatement>0 );
- assert( p->iStatement==(db->nStatement+db->nSavepoint) );
-
- for(i=0; i<db->nDb; i++){
- int rc2 = SQLITE_OK;
- Btree *pBt = db->aDb[i].pBt;
- if( pBt ){
- if( eOp==SAVEPOINT_ROLLBACK ){
- rc2 = sqlite3BtreeSavepoint(pBt, SAVEPOINT_ROLLBACK, iSavepoint);
- }
- if( rc2==SQLITE_OK ){
- rc2 = sqlite3BtreeSavepoint(pBt, SAVEPOINT_RELEASE, iSavepoint);
- }
- if( rc==SQLITE_OK ){
- rc = rc2;
- }
- }
- }
- db->nStatement--;
- p->iStatement = 0;
+ assert( eOp==SAVEPOINT_ROLLBACK || eOp==SAVEPOINT_RELEASE);
+ assert( db->nStatement>0 );
+ assert( p->iStatement==(db->nStatement+db->nSavepoint) );
- if( rc==SQLITE_OK ){
+ for(i=0; i<db->nDb; i++){
+ int rc2 = SQLITE_OK;
+ Btree *pBt = db->aDb[i].pBt;
+ if( pBt ){
if( eOp==SAVEPOINT_ROLLBACK ){
- rc = sqlite3VtabSavepoint(db, SAVEPOINT_ROLLBACK, iSavepoint);
+ rc2 = sqlite3BtreeSavepoint(pBt, SAVEPOINT_ROLLBACK, iSavepoint);
+ }
+ if( rc2==SQLITE_OK ){
+ rc2 = sqlite3BtreeSavepoint(pBt, SAVEPOINT_RELEASE, iSavepoint);
}
if( rc==SQLITE_OK ){
- rc = sqlite3VtabSavepoint(db, SAVEPOINT_RELEASE, iSavepoint);
+ rc = rc2;
}
}
+ }
+ db->nStatement--;
+ p->iStatement = 0;
- /* If the statement transaction is being rolled back, also restore the
- ** database handles deferred constraint counter to the value it had when
- ** the statement transaction was opened. */
+ if( rc==SQLITE_OK ){
if( eOp==SAVEPOINT_ROLLBACK ){
- db->nDeferredCons = p->nStmtDefCons;
- db->nDeferredImmCons = p->nStmtDefImmCons;
+ rc = sqlite3VtabSavepoint(db, SAVEPOINT_ROLLBACK, iSavepoint);
+ }
+ if( rc==SQLITE_OK ){
+ rc = sqlite3VtabSavepoint(db, SAVEPOINT_RELEASE, iSavepoint);
}
}
+
+ /* If the statement transaction is being rolled back, also restore the
+ ** database handles deferred constraint counter to the value it had when
+ ** the statement transaction was opened. */
+ if( eOp==SAVEPOINT_ROLLBACK ){
+ db->nDeferredCons = p->nStmtDefCons;
+ db->nDeferredImmCons = p->nStmtDefImmCons;
+ }
return rc;
}
+SQLITE_PRIVATE int sqlite3VdbeCloseStatement(Vdbe *p, int eOp){
+ if( p->db->nStatement && p->iStatement ){
+ return vdbeCloseStatement(p, eOp);
+ }
+ return SQLITE_OK;
+}
+
/*
** This function is called when a transaction opened by the database
@@ -73425,7 +74132,6 @@ SQLITE_PRIVATE void sqlite3VdbeDeleteAuxData(sqlite3 *db, AuxData **pp, int iOp,
*/
SQLITE_PRIVATE void sqlite3VdbeClearObject(sqlite3 *db, Vdbe *p){
SubProgram *pSub, *pNext;
- int i;
assert( p->db==0 || p->db==db );
releaseMemArray(p->aColName, p->nResColumn*COLNAME_N);
for(pSub=p->pProgram; pSub; pSub=pNext){
@@ -73435,18 +74141,20 @@ SQLITE_PRIVATE void sqlite3VdbeClearObject(sqlite3 *db, Vdbe *p){
}
if( p->magic!=VDBE_MAGIC_INIT ){
releaseMemArray(p->aVar, p->nVar);
- for(i=p->nzVar-1; i>=0; i--) sqlite3DbFree(db, p->azVar[i]);
- sqlite3DbFree(db, p->azVar);
+ sqlite3DbFree(db, p->pVList);
sqlite3DbFree(db, p->pFree);
}
vdbeFreeOpArray(db, p->aOp, p->nOp);
sqlite3DbFree(db, p->aColName);
sqlite3DbFree(db, p->zSql);
#ifdef SQLITE_ENABLE_STMT_SCANSTATUS
- for(i=0; i<p->nScan; i++){
- sqlite3DbFree(db, p->aScan[i].zName);
+ {
+ int i;
+ for(i=0; i<p->nScan; i++){
+ sqlite3DbFree(db, p->aScan[i].zName);
+ }
+ sqlite3DbFree(db, p->aScan);
}
- sqlite3DbFree(db, p->aScan);
#endif
}
@@ -73947,30 +74655,13 @@ SQLITE_PRIVATE u32 sqlite3VdbeSerialGet(
** If an OOM error occurs, NULL is returned.
*/
SQLITE_PRIVATE UnpackedRecord *sqlite3VdbeAllocUnpackedRecord(
- KeyInfo *pKeyInfo, /* Description of the record */
- char *pSpace, /* Unaligned space available */
- int szSpace, /* Size of pSpace[] in bytes */
- char **ppFree /* OUT: Caller should free this pointer */
+ KeyInfo *pKeyInfo /* Description of the record */
){
UnpackedRecord *p; /* Unpacked record to return */
- int nOff; /* Increment pSpace by nOff to align it */
int nByte; /* Number of bytes required for *p */
-
- /* We want to shift the pointer pSpace up such that it is 8-byte aligned.
- ** Thus, we need to calculate a value, nOff, between 0 and 7, to shift
- ** it by. If pSpace is already 8-byte aligned, nOff should be zero.
- */
- nOff = (8 - (SQLITE_PTR_TO_INT(pSpace) & 7)) & 7;
nByte = ROUND8(sizeof(UnpackedRecord)) + sizeof(Mem)*(pKeyInfo->nField+1);
- if( nByte>szSpace+nOff ){
- p = (UnpackedRecord *)sqlite3DbMallocRaw(pKeyInfo->db, nByte);
- *ppFree = (char *)p;
- if( !p ) return 0;
- }else{
- p = (UnpackedRecord*)&pSpace[nOff];
- *ppFree = 0;
- }
-
+ p = (UnpackedRecord *)sqlite3DbMallocRaw(pKeyInfo->db, nByte);
+ if( !p ) return 0;
p->aMem = (Mem*)&((char*)p)[ROUND8(sizeof(UnpackedRecord))];
assert( pKeyInfo->aSortOrder!=0 );
p->pKeyInfo = pKeyInfo;
@@ -74844,7 +75535,7 @@ SQLITE_PRIVATE int sqlite3VdbeIdxRowid(sqlite3 *db, BtCursor *pCur, i64 *rowid){
/* Read in the complete content of the index entry */
sqlite3VdbeMemInit(&m, db, 0);
- rc = sqlite3VdbeMemFromBtree(pCur, 0, (u32)nCellKey, 1, &m);
+ rc = sqlite3VdbeMemFromBtree(pCur, 0, (u32)nCellKey, &m);
if( rc ){
return rc;
}
@@ -74924,7 +75615,7 @@ SQLITE_PRIVATE int sqlite3VdbeIdxKeyCompare(
return SQLITE_CORRUPT_BKPT;
}
sqlite3VdbeMemInit(&m, db, 0);
- rc = sqlite3VdbeMemFromBtree(pCur, 0, (u32)nCellKey, 1, &m);
+ rc = sqlite3VdbeMemFromBtree(pCur, 0, (u32)nCellKey, &m);
if( rc ){
return rc;
}
@@ -75040,10 +75731,10 @@ SQLITE_PRIVATE void sqlite3VtabImportErrmsg(Vdbe *p, sqlite3_vtab *pVtab){
** This function is used to free UnpackedRecord structures allocated by
** the vdbeUnpackRecord() function found in vdbeapi.c.
*/
-static void vdbeFreeUnpacked(sqlite3 *db, UnpackedRecord *p){
+static void vdbeFreeUnpacked(sqlite3 *db, int nField, UnpackedRecord *p){
if( p ){
int i;
- for(i=0; i<p->nField; i++){
+ for(i=0; i<nField; i++){
Mem *pMem = &p->aMem[i];
if( pMem->zMalloc ) sqlite3VdbeMemRelease(pMem);
}
@@ -75076,10 +75767,15 @@ SQLITE_PRIVATE void sqlite3VdbePreUpdateHook(
assert( db->pPreUpdate==0 );
memset(&preupdate, 0, sizeof(PreUpdate));
- if( op==SQLITE_UPDATE ){
- iKey2 = v->aMem[iReg].u.i;
+ if( HasRowid(pTab)==0 ){
+ iKey1 = iKey2 = 0;
+ preupdate.pPk = sqlite3PrimaryKeyIndex(pTab);
}else{
- iKey2 = iKey1;
+ if( op==SQLITE_UPDATE ){
+ iKey2 = v->aMem[iReg].u.i;
+ }else{
+ iKey2 = iKey1;
+ }
}
assert( pCsr->nField==pTab->nCol
@@ -75102,8 +75798,8 @@ SQLITE_PRIVATE void sqlite3VdbePreUpdateHook(
db->xPreUpdateCallback(db->pPreUpdateArg, db, op, zDb, zTbl, iKey1, iKey2);
db->pPreUpdate = 0;
sqlite3DbFree(db, preupdate.aRecord);
- vdbeFreeUnpacked(db, preupdate.pUnpacked);
- vdbeFreeUnpacked(db, preupdate.pNewUnpacked);
+ vdbeFreeUnpacked(db, preupdate.keyinfo.nField+1, preupdate.pUnpacked);
+ vdbeFreeUnpacked(db, preupdate.keyinfo.nField+1, preupdate.pNewUnpacked);
if( preupdate.aNew ){
int i;
for(i=0; i<pCsr->nField; i++){
@@ -76588,10 +77284,8 @@ SQLITE_API int sqlite3_bind_parameter_count(sqlite3_stmt *pStmt){
*/
SQLITE_API const char *sqlite3_bind_parameter_name(sqlite3_stmt *pStmt, int i){
Vdbe *p = (Vdbe*)pStmt;
- if( p==0 || i<1 || i>p->nzVar ){
- return 0;
- }
- return p->azVar[i-1];
+ if( p==0 ) return 0;
+ return sqlite3VListNumToName(p->pVList, i);
}
/*
@@ -76600,19 +77294,8 @@ SQLITE_API const char *sqlite3_bind_parameter_name(sqlite3_stmt *pStmt, int i){
** return 0.
*/
SQLITE_PRIVATE int sqlite3VdbeParameterIndex(Vdbe *p, const char *zName, int nName){
- int i;
- if( p==0 ){
- return 0;
- }
- if( zName ){
- for(i=0; i<p->nzVar; i++){
- const char *z = p->azVar[i];
- if( z && strncmp(z,zName,nName)==0 && z[nName]==0 ){
- return i+1;
- }
- }
- }
- return 0;
+ if( p==0 || zName==0 ) return 0;
+ return sqlite3VListNameToNum(p->pVList, zName, nName);
}
SQLITE_API int sqlite3_bind_parameter_index(sqlite3_stmt *pStmt, const char *zName){
return sqlite3VdbeParameterIndex((Vdbe*)pStmt, zName, sqlite3Strlen30(zName));
@@ -76775,10 +77458,9 @@ static UnpackedRecord *vdbeUnpackRecord(
int nKey,
const void *pKey
){
- char *dummy; /* Dummy argument for AllocUnpackedRecord() */
UnpackedRecord *pRet; /* Return value */
- pRet = sqlite3VdbeAllocUnpackedRecord(pKeyInfo, 0, 0, &dummy);
+ pRet = sqlite3VdbeAllocUnpackedRecord(pKeyInfo);
if( pRet ){
memset(pRet->aMem, 0, sizeof(Mem)*(pKeyInfo->nField+1));
sqlite3VdbeRecordUnpack(pKeyInfo, nKey, pKey, pRet);
@@ -76792,6 +77474,7 @@ static UnpackedRecord *vdbeUnpackRecord(
*/
SQLITE_API int sqlite3_preupdate_old(sqlite3 *db, int iIdx, sqlite3_value **ppValue){
PreUpdate *p = db->pPreUpdate;
+ Mem *pMem;
int rc = SQLITE_OK;
/* Test that this call is being made from within an SQLITE_DELETE or
@@ -76800,6 +77483,9 @@ SQLITE_API int sqlite3_preupdate_old(sqlite3 *db, int iIdx, sqlite3_value **ppVa
rc = SQLITE_MISUSE_BKPT;
goto preupdate_old_out;
}
+ if( p->pPk ){
+ iIdx = sqlite3ColumnOfIndex(p->pPk, iIdx);
+ }
if( iIdx>=p->pCsr->nField || iIdx<0 ){
rc = SQLITE_RANGE;
goto preupdate_old_out;
@@ -76813,7 +77499,7 @@ SQLITE_API int sqlite3_preupdate_old(sqlite3 *db, int iIdx, sqlite3_value **ppVa
nRec = sqlite3BtreePayloadSize(p->pCsr->uc.pCursor);
aRec = sqlite3DbMallocRaw(db, nRec);
if( !aRec ) goto preupdate_old_out;
- rc = sqlite3BtreeData(p->pCsr->uc.pCursor, 0, nRec, aRec);
+ rc = sqlite3BtreePayload(p->pCsr->uc.pCursor, 0, nRec, aRec);
if( rc==SQLITE_OK ){
p->pUnpacked = vdbeUnpackRecord(&p->keyinfo, nRec, aRec);
if( !p->pUnpacked ) rc = SQLITE_NOMEM;
@@ -76825,17 +77511,14 @@ SQLITE_API int sqlite3_preupdate_old(sqlite3 *db, int iIdx, sqlite3_value **ppVa
p->aRecord = aRec;
}
- if( iIdx>=p->pUnpacked->nField ){
+ pMem = *ppValue = &p->pUnpacked->aMem[iIdx];
+ if( iIdx==p->pTab->iPKey ){
+ sqlite3VdbeMemSetInt64(pMem, p->iKey1);
+ }else if( iIdx>=p->pUnpacked->nField ){
*ppValue = (sqlite3_value *)columnNullValue();
- }else{
- Mem *pMem = *ppValue = &p->pUnpacked->aMem[iIdx];
- *ppValue = &p->pUnpacked->aMem[iIdx];
- if( iIdx==p->pTab->iPKey ){
- sqlite3VdbeMemSetInt64(pMem, p->iKey1);
- }else if( p->pTab->aCol[iIdx].affinity==SQLITE_AFF_REAL ){
- if( pMem->flags & MEM_Int ){
- sqlite3VdbeMemRealify(pMem);
- }
+ }else if( p->pTab->aCol[iIdx].affinity==SQLITE_AFF_REAL ){
+ if( pMem->flags & MEM_Int ){
+ sqlite3VdbeMemRealify(pMem);
}
}
@@ -76888,6 +77571,9 @@ SQLITE_API int sqlite3_preupdate_new(sqlite3 *db, int iIdx, sqlite3_value **ppVa
rc = SQLITE_MISUSE_BKPT;
goto preupdate_new_out;
}
+ if( p->pPk && p->op!=SQLITE_UPDATE ){
+ iIdx = sqlite3ColumnOfIndex(p->pPk, iIdx);
+ }
if( iIdx>=p->pCsr->nField || iIdx<0 ){
rc = SQLITE_RANGE;
goto preupdate_new_out;
@@ -76908,13 +77594,11 @@ SQLITE_API int sqlite3_preupdate_new(sqlite3 *db, int iIdx, sqlite3_value **ppVa
}
p->pNewUnpacked = pUnpack;
}
- if( iIdx>=pUnpack->nField ){
+ pMem = &pUnpack->aMem[iIdx];
+ if( iIdx==p->pTab->iPKey ){
+ sqlite3VdbeMemSetInt64(pMem, p->iKey2);
+ }else if( iIdx>=pUnpack->nField ){
pMem = (sqlite3_value *)columnNullValue();
- }else{
- pMem = &pUnpack->aMem[iIdx];
- if( iIdx==p->pTab->iPKey ){
- sqlite3VdbeMemSetInt64(pMem, p->iKey2);
- }
}
}else{
/* For an UPDATE, memory cell (p->iNewReg+1+iIdx) contains the required
@@ -77330,7 +78014,7 @@ SQLITE_API int sqlite3_found_count = 0;
** Test a register to see if it exceeds the current maximum blob size.
** If it does, record the new maximum blob size.
*/
-#if defined(SQLITE_TEST) && !defined(SQLITE_OMIT_BUILTIN_TEST)
+#if defined(SQLITE_TEST) && !defined(SQLITE_UNTESTABLE)
# define UPDATE_MAX_BLOBSIZE(P) updateMaxBlobsize(P)
#else
# define UPDATE_MAX_BLOBSIZE(P)
@@ -77440,7 +78124,7 @@ static VdbeCursor *allocateCursor(
}
if( SQLITE_OK==sqlite3VdbeMemClearAndResize(pMem, nByte) ){
p->apCsr[iCur] = pCx = (VdbeCursor*)pMem->z;
- memset(pCx, 0, sizeof(VdbeCursor));
+ memset(pCx, 0, offsetof(VdbeCursor,pAltCursor));
pCx->eCurType = eCurType;
pCx->iDb = iDb;
pCx->nField = nField;
@@ -77891,8 +78575,6 @@ SQLITE_PRIVATE int sqlite3VdbeExec(
Mem *pIn2 = 0; /* 2nd input operand */
Mem *pIn3 = 0; /* 3rd input operand */
Mem *pOut = 0; /* Output operand */
- int *aPermute = 0; /* Permutation of columns for OP_Compare */
- i64 lastRowid = db->lastRowid; /* Saved value of the last insert ROWID */
#ifdef VDBE_PROFILE
u64 start; /* CPU clock count at start of opcode */
#endif
@@ -77907,7 +78589,6 @@ SQLITE_PRIVATE int sqlite3VdbeExec(
}
assert( p->rc==SQLITE_OK || (p->rc&0xff)==SQLITE_BUSY );
assert( p->bIsReader || p->readOnly!=0 );
- p->rc = SQLITE_OK;
p->iCurrentTime = 0;
assert( p->explain==0 );
p->pResultSet = 0;
@@ -78268,7 +78949,6 @@ case OP_Halt: {
p->nFrame--;
sqlite3VdbeSetChanges(db, p->nChange);
pcx = sqlite3VdbeFrameRestore(pFrame);
- lastRowid = db->lastRowid;
if( pOp->p2==OE_Ignore ){
/* Instruction pcx is the OP_Program that invoked the sub-program
** currently being halted. If the p2 instruction of this OP_Halt
@@ -78285,7 +78965,7 @@ case OP_Halt: {
p->rc = pOp->p1;
p->errorAction = (u8)pOp->p2;
p->pc = pcx;
- assert( pOp->p5>=0 && pOp->p5<=4 );
+ assert( pOp->p5<=4 );
if( p->rc ){
if( pOp->p5 ){
static const char * const azType[] = { "NOT NULL", "UNIQUE", "CHECK",
@@ -78498,12 +79178,12 @@ case OP_Variable: { /* out2 */
Mem *pVar; /* Value being transferred */
assert( pOp->p1>0 && pOp->p1<=p->nVar );
- assert( pOp->p4.z==0 || pOp->p4.z==p->azVar[pOp->p1-1] );
+ assert( pOp->p4.z==0 || pOp->p4.z==sqlite3VListNumToName(p->pVList,pOp->p1) );
pVar = &p->aVar[pOp->p1 - 1];
if( sqlite3VdbeMemTooBig(pVar) ){
goto too_big;
}
- pOut = out2Prerelease(p, pOp);
+ pOut = &aMem[pOp->p2];
sqlite3VdbeMemShallowCopy(pOut, pVar, MEM_Static);
UPDATE_MAX_BLOBSIZE(pOut);
break;
@@ -78990,9 +79670,7 @@ case OP_Function: {
#endif
MemSetTypeFlag(pCtx->pOut, MEM_Null);
pCtx->fErrorOrAux = 0;
- db->lastRowid = lastRowid;
(*pCtx->pFunc->xSFunc)(pCtx, pCtx->argc, pCtx->argv);/* IMP: R-24505-23230 */
- lastRowid = db->lastRowid; /* Remember rowid changes made by xSFunc */
/* If the function returned an error, throw an exception */
if( pCtx->fErrorOrAux ){
@@ -79311,8 +79989,7 @@ case OP_Ge: { /* same as TK_GE, jump, in1, in3 */
assert( pOp->opcode==OP_Eq || pOp->opcode==OP_Ne );
assert( (flags1 & MEM_Cleared)==0 );
assert( (pOp->p5 & SQLITE_JUMPIFNULL)==0 );
- if( (flags1&MEM_Null)!=0
- && (flags3&MEM_Null)!=0
+ if( (flags1&flags3&MEM_Null)!=0
&& (flags3&MEM_Cleared)==0
){
res = 0; /* Operands are equal */
@@ -79449,8 +80126,8 @@ case OP_ElseNotEq: { /* same as TK_ESCAPE, jump */
/* Opcode: Permutation * * * P4 *
**
-** Set the permutation used by the OP_Compare operator to be the array
-** of integers in P4.
+** Set the permutation used by the OP_Compare operator in the next
+** instruction. The permutation is stored in the P4 operand.
**
** The permutation is only valid until the next OP_Compare that has
** the OPFLAG_PERMUTE bit set in P5. Typically the OP_Permutation should
@@ -79462,7 +80139,8 @@ case OP_ElseNotEq: { /* same as TK_ESCAPE, jump */
case OP_Permutation: {
assert( pOp->p4type==P4_INTARRAY );
assert( pOp->p4.ai );
- aPermute = pOp->p4.ai + 1;
+ assert( pOp[1].opcode==OP_Compare );
+ assert( pOp[1].p5 & OPFLAG_PERMUTE );
break;
}
@@ -79495,8 +80173,17 @@ case OP_Compare: {
int idx;
CollSeq *pColl; /* Collating sequence to use on this term */
int bRev; /* True for DESCENDING sort order */
+ int *aPermute; /* The permutation */
- if( (pOp->p5 & OPFLAG_PERMUTE)==0 ) aPermute = 0;
+ if( (pOp->p5 & OPFLAG_PERMUTE)==0 ){
+ aPermute = 0;
+ }else{
+ assert( pOp>aOp );
+ assert( pOp[-1].opcode==OP_Permutation );
+ assert( pOp[-1].p4type==P4_INTARRAY );
+ aPermute = pOp[-1].p4.ai + 1;
+ assert( aPermute!=0 );
+ }
n = pOp->p3;
pKeyInfo = pOp->p4.pKeyInfo;
assert( n>0 );
@@ -79529,7 +80216,6 @@ case OP_Compare: {
break;
}
}
- aPermute = 0;
break;
}
@@ -79779,7 +80465,6 @@ case OP_Column: {
assert( pC->eCurType!=CURTYPE_VTAB );
assert( pC->eCurType!=CURTYPE_PSEUDO || pC->nullRow );
assert( pC->eCurType!=CURTYPE_SORTER );
- pCrsr = pC->uc.pCursor;
if( pC->cacheStatus!=p->cacheCtr ){ /*OPTIMIZATION-IF-FALSE*/
if( pC->nullRow ){
@@ -79795,6 +80480,7 @@ case OP_Column: {
goto op_column_out;
}
}else{
+ pCrsr = pC->uc.pCursor;
assert( pC->eCurType==CURTYPE_BTREE );
assert( pCrsr );
assert( sqlite3BtreeCursorIsValid(pCrsr) );
@@ -79858,7 +80544,7 @@ case OP_Column: {
/* Make sure zData points to enough of the record to cover the header. */
if( pC->aRow==0 ){
memset(&sMem, 0, sizeof(sMem));
- rc = sqlite3VdbeMemFromBtree(pCrsr, 0, aOffset[0], !pC->isTable, &sMem);
+ rc = sqlite3VdbeMemFromBtree(pC->uc.pCursor, 0, aOffset[0], &sMem);
if( rc!=SQLITE_OK ) goto abort_due_to_error;
zData = (u8*)sMem.z;
}else{
@@ -79971,8 +80657,7 @@ case OP_Column: {
static u8 aZero[8]; /* This is the bogus content */
sqlite3VdbeSerialGet(aZero, t, pDest);
}else{
- rc = sqlite3VdbeMemFromBtree(pCrsr, aOffset[p2], len, !pC->isTable,
- pDest);
+ rc = sqlite3VdbeMemFromBtree(pC->uc.pCursor, aOffset[p2], len, pDest);
if( rc!=SQLITE_OK ) goto abort_due_to_error;
sqlite3VdbeSerialGet((const u8*)pDest->z, t, pDest);
pDest->flags &= ~MEM_Ephem;
@@ -80087,6 +80772,20 @@ case OP_MakeRecord: {
}while( zAffinity[0] );
}
+#ifdef SQLITE_ENABLE_NULL_TRIM
+ /* NULLs can be safely trimmed from the end of the record, as long as
+ ** as the schema format is 2 or more and none of the omitted columns
+ ** have a non-NULL default value. Also, the record must be left with
+ ** at least one field. If P5>0 then it will be one more than the
+ ** index of the right-most column with a non-NULL default value */
+ if( pOp->p5 ){
+ while( (pLast->flags & MEM_Null)!=0 && nField>pOp->p5 ){
+ pLast--;
+ nField--;
+ }
+ }
+#endif
+
/* Loop through the elements that will make up the record to figure
** out how much space is required for the new record.
*/
@@ -80837,10 +81536,10 @@ case OP_OpenEphemeral: {
if( pCx==0 ) goto no_mem;
pCx->nullRow = 1;
pCx->isEphemeral = 1;
- rc = sqlite3BtreeOpen(db->pVfs, 0, db, &pCx->pBt,
+ rc = sqlite3BtreeOpen(db->pVfs, 0, db, &pCx->pBtx,
BTREE_OMIT_JOURNAL | BTREE_SINGLE | pOp->p5, vfsFlags);
if( rc==SQLITE_OK ){
- rc = sqlite3BtreeBeginTrans(pCx->pBt, 1);
+ rc = sqlite3BtreeBeginTrans(pCx->pBtx, 1);
}
if( rc==SQLITE_OK ){
/* If a transient index is required, create it by calling
@@ -80848,21 +81547,20 @@ case OP_OpenEphemeral: {
** opening it. If a transient table is required, just use the
** automatically created table with root-page 1 (an BLOB_INTKEY table).
*/
- if( (pKeyInfo = pOp->p4.pKeyInfo)!=0 ){
+ if( (pCx->pKeyInfo = pKeyInfo = pOp->p4.pKeyInfo)!=0 ){
int pgno;
assert( pOp->p4type==P4_KEYINFO );
- rc = sqlite3BtreeCreateTable(pCx->pBt, &pgno, BTREE_BLOBKEY | pOp->p5);
+ rc = sqlite3BtreeCreateTable(pCx->pBtx, &pgno, BTREE_BLOBKEY | pOp->p5);
if( rc==SQLITE_OK ){
assert( pgno==MASTER_ROOT+1 );
assert( pKeyInfo->db==db );
assert( pKeyInfo->enc==ENC(db) );
- pCx->pKeyInfo = pKeyInfo;
- rc = sqlite3BtreeCursor(pCx->pBt, pgno, BTREE_WRCSR,
+ rc = sqlite3BtreeCursor(pCx->pBtx, pgno, BTREE_WRCSR,
pKeyInfo, pCx->uc.pCursor);
}
pCx->isTable = 0;
}else{
- rc = sqlite3BtreeCursor(pCx->pBt, MASTER_ROOT, BTREE_WRCSR,
+ rc = sqlite3BtreeCursor(pCx->pBtx, MASTER_ROOT, BTREE_WRCSR,
0, pCx->uc.pCursor);
pCx->isTable = 1;
}
@@ -81094,7 +81792,8 @@ case OP_SeekGT: { /* jump, in3 */
if( pC->isTable ){
/* The BTREE_SEEK_EQ flag is only set on index cursors */
- assert( sqlite3BtreeCursorHasHint(pC->uc.pCursor, BTREE_SEEK_EQ)==0 );
+ assert( sqlite3BtreeCursorHasHint(pC->uc.pCursor, BTREE_SEEK_EQ)==0
+ || CORRUPT_DB );
/* The input value in P3 might be of any type: integer, real, string,
** blob, or NULL. But it needs to be an integer before we can do
@@ -81296,10 +81995,9 @@ case OP_Found: { /* jump, in3 */
int ii;
VdbeCursor *pC;
int res;
- char *pFree;
+ UnpackedRecord *pFree;
UnpackedRecord *pIdxKey;
UnpackedRecord r;
- char aTempRec[ROUND8(sizeof(UnpackedRecord)) + sizeof(Mem)*4 + 7];
#ifdef SQLITE_TEST
if( pOp->opcode!=OP_NoConflict ) sqlite3_found_count++;
@@ -81316,7 +82014,6 @@ case OP_Found: { /* jump, in3 */
assert( pC->eCurType==CURTYPE_BTREE );
assert( pC->uc.pCursor!=0 );
assert( pC->isTable==0 );
- pFree = 0;
if( pOp->p4.i>0 ){
r.pKeyInfo = pC->pKeyInfo;
r.nField = (u16)pOp->p4.i;
@@ -81329,10 +82026,9 @@ case OP_Found: { /* jump, in3 */
}
#endif
pIdxKey = &r;
+ pFree = 0;
}else{
- pIdxKey = sqlite3VdbeAllocUnpackedRecord(
- pC->pKeyInfo, aTempRec, sizeof(aTempRec), &pFree
- );
+ pFree = pIdxKey = sqlite3VdbeAllocUnpackedRecord(pC->pKeyInfo);
if( pIdxKey==0 ) goto no_mem;
assert( pIn3->flags & MEM_Blob );
(void)ExpandBlob(pIn3);
@@ -81352,7 +82048,7 @@ case OP_Found: { /* jump, in3 */
}
}
rc = sqlite3BtreeMovetoUnpacked(pC->uc.pCursor, pIdxKey, 0, 0, &res);
- sqlite3DbFree(db, pFree);
+ if( pFree ) sqlite3DbFree(db, pFree);
if( rc!=SQLITE_OK ){
goto abort_due_to_error;
}
@@ -81579,7 +82275,7 @@ case OP_NewRowid: { /* out2 */
sqlite3VdbeMemIntegerify(pMem);
assert( (pMem->flags & MEM_Int)!=0 ); /* mem(P3) holds an integer */
if( pMem->u.i==MAX_ROWID || pC->useRandomRowid ){
- rc = SQLITE_FULL; /* IMP: R-12275-61338 */
+ rc = SQLITE_FULL; /* IMP: R-17817-00630 */
goto abort_due_to_error;
}
if( v<pMem->u.i+1 ){
@@ -81631,15 +82327,10 @@ case OP_NewRowid: { /* out2 */
** then rowid is stored for subsequent return by the
** sqlite3_last_insert_rowid() function (otherwise it is unmodified).
**
-** If the OPFLAG_USESEEKRESULT flag of P5 is set and if the result of
-** the last seek operation (OP_NotExists or OP_SeekRowid) was a success,
-** then this
-** operation will not attempt to find the appropriate row before doing
-** the insert but will instead overwrite the row that the cursor is
-** currently pointing to. Presumably, the prior OP_NotExists or
-** OP_SeekRowid opcode
-** has already positioned the cursor correctly. This is an optimization
-** that boosts performance by avoiding redundant seeks.
+** If the OPFLAG_USESEEKRESULT flag of P5 is set, the implementation might
+** run faster by avoiding an unnecessary seek on cursor P1. However,
+** the OPFLAG_USESEEKRESULT flag must only be set if there have been no prior
+** seeks on the cursor or if the most recent seek used a key equal to P3.
**
** If the OPFLAG_ISUPDATE flag is set, then this opcode is part of an
** UPDATE operation. Otherwise (if the flag is clear) then this opcode
@@ -81684,7 +82375,7 @@ case OP_InsertInt: {
assert( pC!=0 );
assert( pC->eCurType==CURTYPE_BTREE );
assert( pC->uc.pCursor!=0 );
- assert( pC->isTable );
+ assert( (pOp->p5 & OPFLAG_ISNOOP) || pC->isTable );
assert( pOp->p4type==P4_TABLE || pOp->p4type>=P4_STATIC );
REGISTER_TRACE(pOp->p2, pData);
@@ -81700,14 +82391,13 @@ case OP_InsertInt: {
}
if( pOp->p4type==P4_TABLE && HAS_UPDATE_HOOK(db) ){
- assert( pC->isTable );
assert( pC->iDb>=0 );
zDb = db->aDb[pC->iDb].zDbSName;
pTab = pOp->p4.pTab;
- assert( HasRowid(pTab) );
+ assert( (pOp->p5 & OPFLAG_ISNOOP) || HasRowid(pTab) );
op = ((pOp->p5 & OPFLAG_ISUPDATE) ? SQLITE_UPDATE : SQLITE_INSERT);
}else{
- pTab = 0; /* Not needed. Silence a comiler warning. */
+ pTab = 0; /* Not needed. Silence a compiler warning. */
zDb = 0; /* Not needed. Silence a compiler warning. */
}
@@ -81719,10 +82409,11 @@ case OP_InsertInt: {
){
sqlite3VdbePreUpdateHook(p, pC, SQLITE_INSERT, zDb, pTab, x.nKey, pOp->p2);
}
+ if( pOp->p5 & OPFLAG_ISNOOP ) break;
#endif
if( pOp->p5 & OPFLAG_NCHANGE ) p->nChange++;
- if( pOp->p5 & OPFLAG_LASTROWID ) db->lastRowid = lastRowid = x.nKey;
+ if( pOp->p5 & OPFLAG_LASTROWID ) db->lastRowid = x.nKey;
if( pData->flags & MEM_Null ){
x.pData = 0;
x.nData = 0;
@@ -81739,7 +82430,7 @@ case OP_InsertInt: {
}
x.pKey = 0;
rc = sqlite3BtreeInsert(pC->uc.pCursor, &x,
- (pOp->p5 & OPFLAG_APPEND)!=0, seekResult
+ (pOp->p5 & (OPFLAG_APPEND|OPFLAG_SAVEPOSITION)), seekResult
);
pC->deferredMoveto = 0;
pC->cacheStatus = CACHE_STALE;
@@ -81831,8 +82522,11 @@ case OP_Delete: {
#ifdef SQLITE_ENABLE_PREUPDATE_HOOK
/* Invoke the pre-update-hook if required. */
- if( db->xPreUpdateCallback && pOp->p4.pTab && HasRowid(pTab) ){
- assert( !(opflags & OPFLAG_ISUPDATE) || (aMem[pOp->p3].flags & MEM_Int) );
+ if( db->xPreUpdateCallback && pOp->p4.pTab ){
+ assert( !(opflags & OPFLAG_ISUPDATE)
+ || HasRowid(pTab)==0
+ || (aMem[pOp->p3].flags & MEM_Int)
+ );
sqlite3VdbePreUpdateHook(p, pC,
(opflags & OPFLAG_ISUPDATE) ? SQLITE_UPDATE : SQLITE_DELETE,
zDb, pTab, pC->movetoTarget,
@@ -81863,6 +82557,7 @@ case OP_Delete: {
rc = sqlite3BtreeDelete(pC->uc.pCursor, pOp->p5);
pC->cacheStatus = CACHE_STALE;
+ pC->seekResult = 0;
if( rc ) goto abort_due_to_error;
/* Invoke the update-hook if required. */
@@ -81949,50 +82644,51 @@ case OP_SorterData: {
break;
}
-/* Opcode: RowData P1 P2 * * *
+/* Opcode: RowData P1 P2 P3 * *
** Synopsis: r[P2]=data
**
-** Write into register P2 the complete row data for cursor P1.
+** Write into register P2 the complete row content for the row at
+** which cursor P1 is currently pointing.
** There is no interpretation of the data.
** It is just copied onto the P2 register exactly as
** it is found in the database file.
**
+** If cursor P1 is an index, then the content is the key of the row.
+** If cursor P2 is a table, then the content extracted is the data.
+**
** If the P1 cursor must be pointing to a valid row (not a NULL row)
** of a real table, not a pseudo-table.
-*/
-/* Opcode: RowKey P1 P2 * * *
-** Synopsis: r[P2]=key
**
-** Write into register P2 the complete row key for cursor P1.
-** There is no interpretation of the data.
-** The key is copied onto the P2 register exactly as
-** it is found in the database file.
+** If P3!=0 then this opcode is allowed to make an ephermeral pointer
+** into the database page. That means that the content of the output
+** register will be invalidated as soon as the cursor moves - including
+** moves caused by other cursors that "save" the the current cursors
+** position in order that they can write to the same table. If P3==0
+** then a copy of the data is made into memory. P3!=0 is faster, but
+** P3==0 is safer.
**
-** If the P1 cursor must be pointing to a valid row (not a NULL row)
-** of a real table, not a pseudo-table.
+** If P3!=0 then the content of the P2 register is unsuitable for use
+** in OP_Result and any OP_Result will invalidate the P2 register content.
+** The P2 register content is invalidated by opcodes like OP_Function or
+** by any use of another cursor pointing to the same table.
*/
-case OP_RowKey:
case OP_RowData: {
VdbeCursor *pC;
BtCursor *pCrsr;
u32 n;
- pOut = &aMem[pOp->p2];
- memAboutToChange(p, pOut);
+ pOut = out2Prerelease(p, pOp);
- /* Note that RowKey and RowData are really exactly the same instruction */
assert( pOp->p1>=0 && pOp->p1<p->nCursor );
pC = p->apCsr[pOp->p1];
assert( pC!=0 );
assert( pC->eCurType==CURTYPE_BTREE );
assert( isSorter(pC)==0 );
- assert( pC->isTable || pOp->opcode!=OP_RowData );
- assert( pC->isTable==0 || pOp->opcode==OP_RowData );
assert( pC->nullRow==0 );
assert( pC->uc.pCursor!=0 );
pCrsr = pC->uc.pCursor;
- /* The OP_RowKey and OP_RowData opcodes always follow OP_NotExists or
+ /* The OP_RowData opcodes always follow OP_NotExists or
** OP_SeekRowid or OP_Rewind/Op_Next with no intervening instructions
** that might invalidate the cursor.
** If this where not the case, on of the following assert()s
@@ -82012,18 +82708,9 @@ case OP_RowData: {
goto too_big;
}
testcase( n==0 );
- if( sqlite3VdbeMemClearAndResize(pOut, MAX(n,32)) ){
- goto no_mem;
- }
- pOut->n = n;
- MemSetTypeFlag(pOut, MEM_Blob);
- if( pC->isTable==0 ){
- rc = sqlite3BtreeKey(pCrsr, 0, n, pOut->z);
- }else{
- rc = sqlite3BtreeData(pCrsr, 0, n, pOut->z);
- }
+ rc = sqlite3VdbeMemFromBtree(pCrsr, 0, n, pOut);
if( rc ) goto abort_due_to_error;
- pOut->enc = SQLITE_UTF8; /* In case the blob is ever cast to text */
+ if( !pOp->p3 ) Deephemeralize(pOut);
UPDATE_MAX_BLOBSIZE(pOut);
REGISTER_TRACE(pOp->p2, pOut);
break;
@@ -82112,6 +82799,13 @@ case OP_NullRow: {
** This opcode leaves the cursor configured to move in reverse order,
** from the end toward the beginning. In other words, the cursor is
** configured to use Prev, not Next.
+**
+** If P3 is -1, then the cursor is positioned at the end of the btree
+** for the purpose of appending a new entry onto the btree. In that
+** case P2 must be 0. It is assumed that the cursor is used only for
+** appending and so if the cursor is valid, then the cursor must already
+** be pointing at the end of the btree and so no changes are made to
+** the cursor.
*/
case OP_Last: { /* jump */
VdbeCursor *pC;
@@ -82125,23 +82819,36 @@ case OP_Last: { /* jump */
pCrsr = pC->uc.pCursor;
res = 0;
assert( pCrsr!=0 );
- rc = sqlite3BtreeLast(pCrsr, &res);
- pC->nullRow = (u8)res;
- pC->deferredMoveto = 0;
- pC->cacheStatus = CACHE_STALE;
pC->seekResult = pOp->p3;
#ifdef SQLITE_DEBUG
pC->seekOp = OP_Last;
#endif
- if( rc ) goto abort_due_to_error;
- if( pOp->p2>0 ){
- VdbeBranchTaken(res!=0,2);
- if( res ) goto jump_to_p2;
+ if( pOp->p3==0 || !sqlite3BtreeCursorIsValidNN(pCrsr) ){
+ rc = sqlite3BtreeLast(pCrsr, &res);
+ pC->nullRow = (u8)res;
+ pC->deferredMoveto = 0;
+ pC->cacheStatus = CACHE_STALE;
+ if( rc ) goto abort_due_to_error;
+ if( pOp->p2>0 ){
+ VdbeBranchTaken(res!=0,2);
+ if( res ) goto jump_to_p2;
+ }
+ }else{
+ assert( pOp->p2==0 );
}
break;
}
+/* Opcode: SorterSort P1 P2 * * *
+**
+** After all records have been inserted into the Sorter object
+** identified by P1, invoke this opcode to actually do the sorting.
+** Jump to P2 if there are no records to be sorted.
+**
+** This opcode is an alias for OP_Sort and OP_Rewind that is used
+** for Sorter objects.
+*/
/* Opcode: Sort P1 P2 * * *
**
** This opcode does exactly the same thing as OP_Rewind except that
@@ -82269,6 +82976,13 @@ case OP_Rewind: { /* jump */
** This opcode works just like Prev except that if cursor P1 is not
** open it behaves a no-op.
*/
+/* Opcode: SorterNext P1 P2 * * P5
+**
+** This opcode works just like OP_Next except that P1 must be a
+** sorter object for which the OP_SorterSort opcode has been
+** invoked. This opcode advances the cursor to the next sorted
+** record, or jumps to P2 if there are no more sorted records.
+*/
case OP_SorterNext: { /* jump */
VdbeCursor *pC;
int res;
@@ -82325,27 +83039,41 @@ next_tail:
goto check_for_interrupt;
}
-/* Opcode: IdxInsert P1 P2 P3 * P5
+/* Opcode: IdxInsert P1 P2 P3 P4 P5
** Synopsis: key=r[P2]
**
** Register P2 holds an SQL index key made using the
** MakeRecord instructions. This opcode writes that key
** into the index P1. Data for the entry is nil.
**
-** P3 is a flag that provides a hint to the b-tree layer that this
-** insert is likely to be an append.
+** If P4 is not zero, then it is the number of values in the unpacked
+** key of reg(P2). In that case, P3 is the index of the first register
+** for the unpacked key. The availability of the unpacked key can sometimes
+** be an optimization.
+**
+** If P5 has the OPFLAG_APPEND bit set, that is a hint to the b-tree layer
+** that this insert is likely to be an append.
**
** If P5 has the OPFLAG_NCHANGE bit set, then the change counter is
** incremented by this instruction. If the OPFLAG_NCHANGE bit is clear,
** then the change counter is unchanged.
**
-** If P5 has the OPFLAG_USESEEKRESULT bit set, then the cursor must have
-** just done a seek to the spot where the new entry is to be inserted.
-** This flag avoids doing an extra seek.
+** If the OPFLAG_USESEEKRESULT flag of P5 is set, the implementation might
+** run faster by avoiding an unnecessary seek on cursor P1. However,
+** the OPFLAG_USESEEKRESULT flag must only be set if there have been no prior
+** seeks on the cursor or if the most recent seek used a key equivalent
+** to P2.
**
** This instruction only works for indices. The equivalent instruction
** for tables is OP_Insert.
*/
+/* Opcode: SorterInsert P1 P2 * * *
+** Synopsis: key=r[P2]
+**
+** Register P2 holds an SQL index key made using the
+** MakeRecord instructions. This opcode writes that key
+** into the sorter P1. Data for the entry is nil.
+*/
case OP_SorterInsert: /* in2 */
case OP_IdxInsert: { /* in2 */
VdbeCursor *pC;
@@ -82367,7 +83095,10 @@ case OP_IdxInsert: { /* in2 */
}else{
x.nKey = pIn2->n;
x.pKey = pIn2->z;
- rc = sqlite3BtreeInsert(pC->uc.pCursor, &x, pOp->p3,
+ x.aMem = aMem + pOp->p3;
+ x.nMem = (u16)pOp->p4.i;
+ rc = sqlite3BtreeInsert(pC->uc.pCursor, &x,
+ (pOp->p5 & (OPFLAG_APPEND|OPFLAG_SAVEPOSITION)),
((pOp->p5 & OPFLAG_USESEEKRESULT) ? pC->seekResult : 0)
);
assert( pC->deferredMoveto==0 );
@@ -82411,6 +83142,7 @@ case OP_IdxDelete: {
}
assert( pC->deferredMoveto==0 );
pC->cacheStatus = CACHE_STALE;
+ pC->seekResult = 0;
break;
}
@@ -82488,7 +83220,6 @@ case OP_IdxRowid: { /* out2 */
}else{
pOut = out2Prerelease(p, pOp);
pOut->u.i = rowid;
- pOut->flags = MEM_Int;
}
}else{
assert( pOp->opcode==OP_IdxRowid );
@@ -82780,7 +83511,7 @@ case OP_ParseSchema: {
assert( iDb>=0 && iDb<db->nDb );
assert( DbHasProperty(db, iDb, DB_SchemaLoaded) );
/* Used to be a conditional */ {
- zMaster = SCHEMA_TABLE(iDb);
+ zMaster = MASTER_NAME;
initData.db = db;
initData.iDb = pOp->p1;
initData.pzErrMsg = &p->zErrMsg;
@@ -83130,7 +83861,7 @@ case OP_Program: { /* jump */
p->nFrame++;
pFrame->pParent = p->pFrame;
- pFrame->lastRowid = lastRowid;
+ pFrame->lastRowid = db->lastRowid;
pFrame->nChange = p->nChange;
pFrame->nDbChange = p->db->nChange;
assert( pFrame->pAuxData==0 );
@@ -83291,29 +84022,42 @@ case OP_IfPos: { /* jump, in1 */
** Otherwise, r[P2] is set to the sum of r[P1] and r[P3].
*/
case OP_OffsetLimit: { /* in1, out2, in3 */
+ i64 x;
pIn1 = &aMem[pOp->p1];
pIn3 = &aMem[pOp->p3];
pOut = out2Prerelease(p, pOp);
assert( pIn1->flags & MEM_Int );
assert( pIn3->flags & MEM_Int );
- pOut->u.i = pIn1->u.i<=0 ? -1 : pIn1->u.i+(pIn3->u.i>0?pIn3->u.i:0);
+ x = pIn1->u.i;
+ if( x<=0 || sqlite3AddInt64(&x, pIn3->u.i>0?pIn3->u.i:0) ){
+ /* If the LIMIT is less than or equal to zero, loop forever. This
+ ** is documented. But also, if the LIMIT+OFFSET exceeds 2^63 then
+ ** also loop forever. This is undocumented. In fact, one could argue
+ ** that the loop should terminate. But assuming 1 billion iterations
+ ** per second (far exceeding the capabilities of any current hardware)
+ ** it would take nearly 300 years to actually reach the limit. So
+ ** looping forever is a reasonable approximation. */
+ pOut->u.i = -1;
+ }else{
+ pOut->u.i = x;
+ }
break;
}
-/* Opcode: IfNotZero P1 P2 P3 * *
-** Synopsis: if r[P1]!=0 then r[P1]-=P3, goto P2
+/* Opcode: IfNotZero P1 P2 * * *
+** Synopsis: if r[P1]!=0 then r[P1]--, goto P2
**
** Register P1 must contain an integer. If the content of register P1 is
-** initially nonzero, then subtract P3 from the value in register P1 and
-** jump to P2. If register P1 is initially zero, leave it unchanged
-** and fall through.
+** initially greater than zero, then decrement the value in register P1.
+** If it is non-zero (negative or positive) and then also jump to P2.
+** If register P1 is initially zero, leave it unchanged and fall through.
*/
case OP_IfNotZero: { /* jump, in1 */
pIn1 = &aMem[pOp->p1];
assert( pIn1->flags&MEM_Int );
VdbeBranchTaken(pIn1->u.i<0, 2);
if( pIn1->u.i ){
- pIn1->u.i -= pOp->p3;
+ if( pIn1->u.i>0 ) pIn1->u.i--;
goto jump_to_p2;
}
break;
@@ -83322,13 +84066,13 @@ case OP_IfNotZero: { /* jump, in1 */
/* Opcode: DecrJumpZero P1 P2 * * *
** Synopsis: if (--r[P1])==0 goto P2
**
-** Register P1 must hold an integer. Decrement the value in register P1
-** then jump to P2 if the new value is exactly zero.
+** Register P1 must hold an integer. Decrement the value in P1
+** and jump to P2 if the new value is exactly zero.
*/
case OP_DecrJumpZero: { /* jump, in1 */
pIn1 = &aMem[pOp->p1];
assert( pIn1->flags&MEM_Int );
- pIn1->u.i--;
+ if( pIn1->u.i>SMALLEST_INT64 ) pIn1->u.i--;
VdbeBranchTaken(pIn1->u.i==0, 2);
if( pIn1->u.i==0 ) goto jump_to_p2;
break;
@@ -83574,7 +84318,7 @@ case OP_JournalMode: { /* out2 */
** file. An EXCLUSIVE lock may still be held on the database file
** after a successful return.
*/
- rc = sqlite3PagerCloseWal(pPager);
+ rc = sqlite3PagerCloseWal(pPager, db);
if( rc==SQLITE_OK ){
sqlite3PagerSetJournalMode(pPager, eNew);
}
@@ -84058,7 +84802,7 @@ case OP_VUpdate: {
sqlite3VtabImportErrmsg(p, pVtab);
if( rc==SQLITE_OK && pOp->p1 ){
assert( nArg>1 && apArg[0] && (apArg[0]->flags&MEM_Null) );
- db->lastRowid = lastRowid = rowid;
+ db->lastRowid = rowid;
}
if( (rc&0xff)==SQLITE_CONSTRAINT && pOp->p4.pVtab->bConstraint ){
if( pOp->p5==OE_Ignore ){
@@ -84294,7 +85038,6 @@ abort_due_to_error:
** release the mutexes on btrees that were acquired at the
** top. */
vdbe_return:
- db->lastRowid = lastRowid;
testcase( nVmStep>0 );
p->aCounter[SQLITE_STMTSTATUS_VM_STEP] += (int)nVmStep;
sqlite3VdbeLeave(p);
@@ -84358,10 +85101,9 @@ abort_due_to_interrupt:
*/
typedef struct Incrblob Incrblob;
struct Incrblob {
- int flags; /* Copy of "flags" passed to sqlite3_blob_open() */
int nByte; /* Size of open blob, in bytes */
int iOffset; /* Byte offset of blob in cursor data */
- int iCol; /* Table column this handle is open on */
+ u16 iCol; /* Table column this handle is open on */
BtCursor *pCsr; /* Cursor pointing at blob row */
sqlite3_stmt *pStmt; /* Statement holding cursor open */
sqlite3 *db; /* The associated database */
@@ -84392,17 +85134,27 @@ static int blobSeekToRow(Incrblob *p, sqlite3_int64 iRow, char **pzErr){
char *zErr = 0; /* Error message */
Vdbe *v = (Vdbe *)p->pStmt;
- /* Set the value of the SQL statements only variable to integer iRow.
- ** This is done directly instead of using sqlite3_bind_int64() to avoid
- ** triggering asserts related to mutexes.
+ /* Set the value of register r[1] in the SQL statement to integer iRow.
+ ** This is done directly as a performance optimization
*/
- assert( v->aVar[0].flags&MEM_Int );
- v->aVar[0].u.i = iRow;
+ v->aMem[1].flags = MEM_Int;
+ v->aMem[1].u.i = iRow;
- rc = sqlite3_step(p->pStmt);
+ /* If the statement has been run before (and is paused at the OP_ResultRow)
+ ** then back it up to the point where it does the OP_SeekRowid. This could
+ ** have been down with an extra OP_Goto, but simply setting the program
+ ** counter is faster. */
+ if( v->pc>3 ){
+ v->pc = 3;
+ rc = sqlite3VdbeExec(v);
+ }else{
+ rc = sqlite3_step(p->pStmt);
+ }
if( rc==SQLITE_ROW ){
VdbeCursor *pC = v->apCsr[0];
- u32 type = pC->aType[p->iCol];
+ u32 type = pC->nHdrParsed>p->iCol ? pC->aType[p->iCol] : 0;
+ testcase( pC->nHdrParsed==p->iCol );
+ testcase( pC->nHdrParsed==p->iCol+1 );
if( type<12 ){
zErr = sqlite3MPrintf(p->db, "cannot open value of type %s",
type==0?"null": type==7?"real": "integer"
@@ -84447,7 +85199,7 @@ SQLITE_API int sqlite3_blob_open(
const char *zTable, /* The table containing the blob */
const char *zColumn, /* The column containing the blob */
sqlite_int64 iRow, /* The row containing the glob */
- int flags, /* True -> read/write access, false -> read-only */
+ int wrFlag, /* True -> read/write access, false -> read-only */
sqlite3_blob **ppBlob /* Handle for accessing the blob returned here */
){
int nAttempt = 0;
@@ -84469,7 +85221,7 @@ SQLITE_API int sqlite3_blob_open(
return SQLITE_MISUSE_BKPT;
}
#endif
- flags = !!flags; /* flags = (flags ? 1 : 0); */
+ wrFlag = !!wrFlag; /* wrFlag = (wrFlag ? 1 : 0); */
sqlite3_mutex_enter(db->mutex);
@@ -84529,9 +85281,8 @@ SQLITE_API int sqlite3_blob_open(
/* If the value is being opened for writing, check that the
** column is not indexed, and that it is not part of a foreign key.
- ** It is against the rules to open a column to which either of these
- ** descriptions applies for writing. */
- if( flags ){
+ */
+ if( wrFlag ){
const char *zFault = 0;
Index *pIdx;
#ifndef SQLITE_OMIT_FOREIGN_KEY
@@ -84592,19 +85343,17 @@ SQLITE_API int sqlite3_blob_open(
static const VdbeOpList openBlob[] = {
{OP_TableLock, 0, 0, 0}, /* 0: Acquire a read or write lock */
{OP_OpenRead, 0, 0, 0}, /* 1: Open a cursor */
- {OP_Variable, 1, 1, 0}, /* 2: Move ?1 into reg[1] */
- {OP_NotExists, 0, 7, 1}, /* 3: Seek the cursor */
- {OP_Column, 0, 0, 1}, /* 4 */
- {OP_ResultRow, 1, 0, 0}, /* 5 */
- {OP_Goto, 0, 2, 0}, /* 6 */
- {OP_Close, 0, 0, 0}, /* 7 */
- {OP_Halt, 0, 0, 0}, /* 8 */
+ /* blobSeekToRow() will initialize r[1] to the desired rowid */
+ {OP_NotExists, 0, 5, 1}, /* 2: Seek the cursor to rowid=r[1] */
+ {OP_Column, 0, 0, 1}, /* 3 */
+ {OP_ResultRow, 1, 0, 0}, /* 4 */
+ {OP_Halt, 0, 0, 0}, /* 5 */
};
Vdbe *v = (Vdbe *)pBlob->pStmt;
int iDb = sqlite3SchemaToIndex(db, pTab->pSchema);
VdbeOp *aOp;
- sqlite3VdbeAddOp4Int(v, OP_Transaction, iDb, flags,
+ sqlite3VdbeAddOp4Int(v, OP_Transaction, iDb, wrFlag,
pTab->pSchema->schema_cookie,
pTab->pSchema->iGeneration);
sqlite3VdbeChangeP5(v, 1);
@@ -84621,7 +85370,7 @@ SQLITE_API int sqlite3_blob_open(
#else
aOp[0].p1 = iDb;
aOp[0].p2 = pTab->tnum;
- aOp[0].p3 = flags;
+ aOp[0].p3 = wrFlag;
sqlite3VdbeChangeP4(v, 1, pTab->zName, P4_TRANSIENT);
}
if( db->mallocFailed==0 ){
@@ -84629,7 +85378,7 @@ SQLITE_API int sqlite3_blob_open(
/* Remove either the OP_OpenWrite or OpenRead. Set the P2
** parameter of the other to pTab->tnum. */
- if( flags ) aOp[1].opcode = OP_OpenWrite;
+ if( wrFlag ) aOp[1].opcode = OP_OpenWrite;
aOp[1].p2 = pTab->tnum;
aOp[1].p3 = iDb;
@@ -84642,23 +85391,21 @@ SQLITE_API int sqlite3_blob_open(
*/
aOp[1].p4type = P4_INT32;
aOp[1].p4.i = pTab->nCol+1;
- aOp[4].p2 = pTab->nCol;
+ aOp[3].p2 = pTab->nCol;
- pParse->nVar = 1;
+ pParse->nVar = 0;
pParse->nMem = 1;
pParse->nTab = 1;
sqlite3VdbeMakeReady(v, pParse);
}
}
- pBlob->flags = flags;
pBlob->iCol = iCol;
pBlob->db = db;
sqlite3BtreeLeaveAll(db);
if( db->mallocFailed ){
goto blob_open_out;
}
- sqlite3_bind_int64(pBlob->pStmt, 1, iRow);
rc = blobSeekToRow(pBlob, iRow, &zErr);
} while( (++nAttempt)<SQLITE_MAX_SCHEMA_RETRY && rc==SQLITE_SCHEMA );
@@ -84776,7 +85523,7 @@ static int blobReadWrite(
** Read data from a blob handle.
*/
SQLITE_API int sqlite3_blob_read(sqlite3_blob *pBlob, void *z, int n, int iOffset){
- return blobReadWrite(pBlob, z, n, iOffset, sqlite3BtreeData);
+ return blobReadWrite(pBlob, z, n, iOffset, sqlite3BtreePayloadChecked);
}
/*
@@ -85802,7 +86549,7 @@ SQLITE_PRIVATE int sqlite3VdbeSorterInit(
}
#endif
- assert( pCsr->pKeyInfo && pCsr->pBt==0 );
+ assert( pCsr->pKeyInfo && pCsr->pBtx==0 );
assert( pCsr->eCurType==CURTYPE_SORTER );
szKeyInfo = sizeof(KeyInfo) + (pCsr->pKeyInfo->nField-1)*sizeof(CollSeq*);
sz = sizeof(VdbeSorter) + nWorker * sizeof(SortSubtask);
@@ -86170,12 +86917,8 @@ static int vdbeSorterOpenTempFile(
*/
static int vdbeSortAllocUnpacked(SortSubtask *pTask){
if( pTask->pUnpacked==0 ){
- char *pFree;
- pTask->pUnpacked = sqlite3VdbeAllocUnpackedRecord(
- pTask->pSorter->pKeyInfo, 0, 0, &pFree
- );
- assert( pTask->pUnpacked==(UnpackedRecord*)pFree );
- if( pFree==0 ) return SQLITE_NOMEM_BKPT;
+ pTask->pUnpacked = sqlite3VdbeAllocUnpackedRecord(pTask->pSorter->pKeyInfo);
+ if( pTask->pUnpacked==0 ) return SQLITE_NOMEM_BKPT;
pTask->pUnpacked->nField = pTask->pSorter->pKeyInfo->nField;
pTask->pUnpacked->errCode = 0;
}
@@ -87576,9 +88319,7 @@ SQLITE_PRIVATE int sqlite3VdbeSorterCompare(
r2 = pSorter->pUnpacked;
pKeyInfo = pCsr->pKeyInfo;
if( r2==0 ){
- char *p;
- r2 = pSorter->pUnpacked = sqlite3VdbeAllocUnpackedRecord(pKeyInfo,0,0,&p);
- assert( pSorter->pUnpacked==(UnpackedRecord*)p );
+ r2 = pSorter->pUnpacked = sqlite3VdbeAllocUnpackedRecord(pKeyInfo);
if( r2==0 ) return SQLITE_NOMEM_BKPT;
r2->nField = nKeyCol;
}
@@ -88201,8 +88942,6 @@ SQLITE_PRIVATE int sqlite3WalkSelect(Walker *pWalker, Select *p){
** table and column.
*/
/* #include "sqliteInt.h" */
-/* #include <stdlib.h> */
-/* #include <string.h> */
/*
** Walk the expression tree pExpr and increase the aggregate function
@@ -88586,6 +89325,10 @@ static int lookupName(
sqlite3ErrorMsg(pParse, "misuse of aliased aggregate %s", zAs);
return WRC_Abort;
}
+ if( sqlite3ExprVectorSize(pOrig)!=1 ){
+ sqlite3ErrorMsg(pParse, "row value misused");
+ return WRC_Abort;
+ }
resolveAlias(pParse, pEList, j, pExpr, "", nSubquery);
cnt = 1;
pMatch = 0;
@@ -88962,6 +89705,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){
notValid(pParse, pNC, "parameters", NC_IsCheck|NC_PartIdx|NC_IdxExpr);
break;
}
+ case TK_BETWEEN:
case TK_EQ:
case TK_NE:
case TK_LT:
@@ -88972,10 +89716,17 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){
case TK_ISNOT: {
int nLeft, nRight;
if( pParse->db->mallocFailed ) break;
- assert( pExpr->pRight!=0 );
assert( pExpr->pLeft!=0 );
nLeft = sqlite3ExprVectorSize(pExpr->pLeft);
- nRight = sqlite3ExprVectorSize(pExpr->pRight);
+ if( pExpr->op==TK_BETWEEN ){
+ nRight = sqlite3ExprVectorSize(pExpr->x.pList->a[0].pExpr);
+ if( nRight==nLeft ){
+ nRight = sqlite3ExprVectorSize(pExpr->x.pList->a[1].pExpr);
+ }
+ }else{
+ assert( pExpr->pRight!=0 );
+ nRight = sqlite3ExprVectorSize(pExpr->pRight);
+ }
if( nLeft!=nRight ){
testcase( pExpr->op==TK_EQ );
testcase( pExpr->op==TK_NE );
@@ -88985,6 +89736,7 @@ static int resolveExprStep(Walker *pWalker, Expr *pExpr){
testcase( pExpr->op==TK_GE );
testcase( pExpr->op==TK_IS );
testcase( pExpr->op==TK_ISNOT );
+ testcase( pExpr->op==TK_BETWEEN );
sqlite3ErrorMsg(pParse, "row value misused");
}
break;
@@ -89948,7 +90700,7 @@ static char comparisonAffinity(Expr *pExpr){
aff = sqlite3CompareAffinity(pExpr->pRight, aff);
}else if( ExprHasProperty(pExpr, EP_xIsSelect) ){
aff = sqlite3CompareAffinity(pExpr->x.pSelect->pEList->a[0].pExpr, aff);
- }else if( NEVER(aff==0) ){
+ }else if( aff==0 ){
aff = SQLITE_AFF_BLOB;
}
return aff;
@@ -90131,9 +90883,10 @@ SQLITE_PRIVATE Expr *sqlite3ExprForVectorField(
assert( pVector->flags & EP_xIsSelect );
/* The TK_SELECT_COLUMN Expr node:
**
- ** pLeft: pVector containing TK_SELECT
+ ** pLeft: pVector containing TK_SELECT. Not deleted.
** pRight: not used. But recursively deleted.
** iColumn: Index of a column in pVector
+ ** iTable: 0 or the number of columns on the LHS of an assignment
** pLeft->iTable: First in an array of register holding result, or 0
** if the result is not yet computed.
**
@@ -90144,7 +90897,7 @@ SQLITE_PRIVATE Expr *sqlite3ExprForVectorField(
** with the same pLeft pointer to the pVector, but only one of them
** will own the pVector.
*/
- pRet = sqlite3PExpr(pParse, TK_SELECT_COLUMN, 0, 0, 0);
+ pRet = sqlite3PExpr(pParse, TK_SELECT_COLUMN, 0, 0);
if( pRet ){
pRet->iColumn = iField;
pRet->pLeft = pVector;
@@ -90244,7 +90997,10 @@ static void codeVectorCompare(
u8 opx = op;
int addrDone = sqlite3VdbeMakeLabel(v);
- assert( nLeft==sqlite3ExprVectorSize(pRight) );
+ if( nLeft!=sqlite3ExprVectorSize(pRight) ){
+ sqlite3ErrorMsg(pParse, "row value misused");
+ return;
+ }
assert( pExpr->op==TK_EQ || pExpr->op==TK_NE
|| pExpr->op==TK_IS || pExpr->op==TK_ISNOT
|| pExpr->op==TK_LT || pExpr->op==TK_GT
@@ -90536,15 +91292,19 @@ SQLITE_PRIVATE Expr *sqlite3PExpr(
Parse *pParse, /* Parsing context */
int op, /* Expression opcode */
Expr *pLeft, /* Left operand */
- Expr *pRight, /* Right operand */
- const Token *pToken /* Argument token */
+ Expr *pRight /* Right operand */
){
Expr *p;
if( op==TK_AND && pParse->nErr==0 ){
/* Take advantage of short-circuit false optimization for AND */
p = sqlite3ExprAnd(pParse->db, pLeft, pRight);
}else{
- p = sqlite3ExprAlloc(pParse->db, op & TKFLG_MASK, pToken, 1);
+ p = sqlite3DbMallocRawNN(pParse->db, sizeof(Expr));
+ if( p ){
+ memset(p, 0, sizeof(Expr));
+ p->op = op & TKFLG_MASK;
+ p->iAgg = -1;
+ }
sqlite3ExprAttachSubtrees(pParse->db, p, pLeft, pRight);
}
if( p ) {
@@ -90647,7 +91407,7 @@ SQLITE_PRIVATE Expr *sqlite3ExprFunction(Parse *pParse, ExprList *pList, Token *
** variable number.
**
** Wildcards of the form "?nnn" are assigned the number "nnn". We make
-** sure "nnn" is not too be to avoid a denial of service attack when
+** sure "nnn" is not too big to avoid a denial of service attack when
** the SQL statement comes from an external source.
**
** Wildcards of the form ":aaa", "@aaa", or "$aaa" are assigned the same number
@@ -90658,6 +91418,7 @@ SQLITE_PRIVATE Expr *sqlite3ExprFunction(Parse *pParse, ExprList *pList, Token *
SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse *pParse, Expr *pExpr, u32 n){
sqlite3 *db = pParse->db;
const char *z;
+ ynVar x;
if( pExpr==0 ) return;
assert( !ExprHasProperty(pExpr, EP_IntValue|EP_Reduced|EP_TokenOnly) );
@@ -90668,15 +91429,20 @@ SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse *pParse, Expr *pExpr, u32 n
if( z[1]==0 ){
/* Wildcard of the form "?". Assign the next variable number */
assert( z[0]=='?' );
- pExpr->iColumn = (ynVar)(++pParse->nVar);
+ x = (ynVar)(++pParse->nVar);
}else{
- ynVar x;
+ int doAdd = 0;
if( z[0]=='?' ){
/* Wildcard of the form "?nnn". Convert "nnn" to an integer and
** use it as the variable number */
i64 i;
- int bOk = 0==sqlite3Atoi64(&z[1], &i, n-1, SQLITE_UTF8);
- x = (ynVar)i;
+ int bOk;
+ if( n==2 ){ /*OPTIMIZATION-IF-TRUE*/
+ i = z[1]-'0'; /* The common case of ?N for a single digit N */
+ bOk = 1;
+ }else{
+ bOk = 0==sqlite3Atoi64(&z[1], &i, n-1, SQLITE_UTF8);
+ }
testcase( i==0 );
testcase( i==1 );
testcase( i==db->aLimit[SQLITE_LIMIT_VARIABLE_NUMBER]-1 );
@@ -90686,40 +91452,30 @@ SQLITE_PRIVATE void sqlite3ExprAssignVarNumber(Parse *pParse, Expr *pExpr, u32 n
db->aLimit[SQLITE_LIMIT_VARIABLE_NUMBER]);
return;
}
- if( i>pParse->nVar ){
- pParse->nVar = (int)i;
+ x = (ynVar)i;
+ if( x>pParse->nVar ){
+ pParse->nVar = (int)x;
+ doAdd = 1;
+ }else if( sqlite3VListNumToName(pParse->pVList, x)==0 ){
+ doAdd = 1;
}
}else{
/* Wildcards like ":aaa", "$aaa" or "@aaa". Reuse the same variable
** number as the prior appearance of the same name, or if the name
** has never appeared before, reuse the same variable number
*/
- ynVar i;
- for(i=x=0; i<pParse->nzVar; i++){
- if( pParse->azVar[i] && strcmp(pParse->azVar[i],z)==0 ){
- x = (ynVar)i+1;
- break;
- }
- }
- if( x==0 ) x = (ynVar)(++pParse->nVar);
- }
- pExpr->iColumn = x;
- if( x>pParse->nzVar ){
- char **a;
- a = sqlite3DbRealloc(db, pParse->azVar, x*sizeof(a[0]));
- if( a==0 ){
- assert( db->mallocFailed ); /* Error reported through mallocFailed */
- return;
+ x = (ynVar)sqlite3VListNameToNum(pParse->pVList, z, n);
+ if( x==0 ){
+ x = (ynVar)(++pParse->nVar);
+ doAdd = 1;
}
- pParse->azVar = a;
- memset(&a[pParse->nzVar], 0, (x-pParse->nzVar)*sizeof(a[0]));
- pParse->nzVar = x;
}
- if( pParse->azVar[x-1]==0 ){
- pParse->azVar[x-1] = sqlite3DbStrNDup(db, z, n);
+ if( doAdd ){
+ pParse->pVList = sqlite3VListAdd(db, pParse->pVList, z, n, x);
}
- }
- if( pParse->nVar>db->aLimit[SQLITE_LIMIT_VARIABLE_NUMBER] ){
+ }
+ pExpr->iColumn = x;
+ if( x>db->aLimit[SQLITE_LIMIT_VARIABLE_NUMBER] ){
sqlite3ErrorMsg(pParse, "too many SQL variables");
}
}
@@ -90808,7 +91564,7 @@ static int dupedExprStructSize(Expr *p, int flags){
assert( flags==EXPRDUP_REDUCE || flags==0 ); /* Only one flag value allowed */
assert( EXPR_FULLSIZE<=0xfff );
assert( (0xfff & (EP_Reduced|EP_TokenOnly))==0 );
- if( 0==flags ){
+ if( 0==flags || p->op==TK_SELECT_COLUMN ){
nSize = EXPR_FULLSIZE;
}else{
assert( !ExprHasProperty(p, EP_TokenOnly|EP_Reduced) );
@@ -90951,6 +91707,8 @@ static Expr *exprDup(sqlite3 *db, Expr *p, int dupFlags, u8 **pzBuffer){
if( !ExprHasProperty(p, EP_TokenOnly|EP_Leaf) ){
if( pNew->op==TK_SELECT_COLUMN ){
pNew->pLeft = p->pLeft;
+ assert( p->iColumn==0 || p->pRight==0 );
+ assert( p->pRight==0 || p->pRight==p->pLeft );
}else{
pNew->pLeft = sqlite3ExprDup(db, p->pLeft, 0);
}
@@ -91013,6 +91771,7 @@ SQLITE_PRIVATE ExprList *sqlite3ExprListDup(sqlite3 *db, ExprList *p, int flags)
ExprList *pNew;
struct ExprList_item *pItem, *pOldItem;
int i;
+ Expr *pPriorSelectCol = 0;
assert( db!=0 );
if( p==0 ) return 0;
pNew = sqlite3DbMallocRawNN(db, sizeof(*pNew) );
@@ -91027,7 +91786,24 @@ SQLITE_PRIVATE ExprList *sqlite3ExprListDup(sqlite3 *db, ExprList *p, int flags)
pOldItem = p->a;
for(i=0; i<p->nExpr; i++, pItem++, pOldItem++){
Expr *pOldExpr = pOldItem->pExpr;
+ Expr *pNewExpr;
pItem->pExpr = sqlite3ExprDup(db, pOldExpr, flags);
+ if( pOldExpr
+ && pOldExpr->op==TK_SELECT_COLUMN
+ && (pNewExpr = pItem->pExpr)!=0
+ ){
+ assert( pNewExpr->iColumn==0 || i>0 );
+ if( pNewExpr->iColumn==0 ){
+ assert( pOldExpr->pLeft==pOldExpr->pRight );
+ pPriorSelectCol = pNewExpr->pLeft = pNewExpr->pRight;
+ }else{
+ assert( i>0 );
+ assert( pItem[-1].pExpr!=0 );
+ assert( pNewExpr->iColumn==pItem[-1].pExpr->iColumn+1 );
+ assert( pPriorSelectCol==pItem[-1].pExpr->pLeft );
+ pNewExpr->pLeft = pPriorSelectCol;
+ }
+ }
pItem->zName = sqlite3DbStrDup(db, pOldItem->zName);
pItem->zSpan = sqlite3DbStrDup(db, pOldItem->zSpan);
pItem->sortOrder = pOldItem->sortOrder;
@@ -91078,7 +91854,7 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListDup(sqlite3 *db, SrcList *p, int flags){
}
pTab = pNewItem->pTab = pOldItem->pTab;
if( pTab ){
- pTab->nRef++;
+ pTab->nTabRef++;
}
pNewItem->pSelect = sqlite3SelectDup(db, pOldItem->pSelect, flags);
pNewItem->pOn = sqlite3ExprDup(db, pOldItem->pOn, flags);
@@ -91111,33 +91887,41 @@ SQLITE_PRIVATE IdList *sqlite3IdListDup(sqlite3 *db, IdList *p){
}
return pNew;
}
-SQLITE_PRIVATE Select *sqlite3SelectDup(sqlite3 *db, Select *p, int flags){
- Select *pNew, *pPrior;
+SQLITE_PRIVATE Select *sqlite3SelectDup(sqlite3 *db, Select *pDup, int flags){
+ Select *pRet = 0;
+ Select *pNext = 0;
+ Select **pp = &pRet;
+ Select *p;
+
assert( db!=0 );
- if( p==0 ) return 0;
- pNew = sqlite3DbMallocRawNN(db, sizeof(*p) );
- if( pNew==0 ) return 0;
- pNew->pEList = sqlite3ExprListDup(db, p->pEList, flags);
- pNew->pSrc = sqlite3SrcListDup(db, p->pSrc, flags);
- pNew->pWhere = sqlite3ExprDup(db, p->pWhere, flags);
- pNew->pGroupBy = sqlite3ExprListDup(db, p->pGroupBy, flags);
- pNew->pHaving = sqlite3ExprDup(db, p->pHaving, flags);
- pNew->pOrderBy = sqlite3ExprListDup(db, p->pOrderBy, flags);
- pNew->op = p->op;
- pNew->pPrior = pPrior = sqlite3SelectDup(db, p->pPrior, flags);
- if( pPrior ) pPrior->pNext = pNew;
- pNew->pNext = 0;
- pNew->pLimit = sqlite3ExprDup(db, p->pLimit, flags);
- pNew->pOffset = sqlite3ExprDup(db, p->pOffset, flags);
- pNew->iLimit = 0;
- pNew->iOffset = 0;
- pNew->selFlags = p->selFlags & ~SF_UsesEphemeral;
- pNew->addrOpenEphm[0] = -1;
- pNew->addrOpenEphm[1] = -1;
- pNew->nSelectRow = p->nSelectRow;
- pNew->pWith = withDup(db, p->pWith);
- sqlite3SelectSetName(pNew, p->zSelName);
- return pNew;
+ for(p=pDup; p; p=p->pPrior){
+ Select *pNew = sqlite3DbMallocRawNN(db, sizeof(*p) );
+ if( pNew==0 ) break;
+ pNew->pEList = sqlite3ExprListDup(db, p->pEList, flags);
+ pNew->pSrc = sqlite3SrcListDup(db, p->pSrc, flags);
+ pNew->pWhere = sqlite3ExprDup(db, p->pWhere, flags);
+ pNew->pGroupBy = sqlite3ExprListDup(db, p->pGroupBy, flags);
+ pNew->pHaving = sqlite3ExprDup(db, p->pHaving, flags);
+ pNew->pOrderBy = sqlite3ExprListDup(db, p->pOrderBy, flags);
+ pNew->op = p->op;
+ pNew->pNext = pNext;
+ pNew->pPrior = 0;
+ pNew->pLimit = sqlite3ExprDup(db, p->pLimit, flags);
+ pNew->pOffset = sqlite3ExprDup(db, p->pOffset, flags);
+ pNew->iLimit = 0;
+ pNew->iOffset = 0;
+ pNew->selFlags = p->selFlags & ~SF_UsesEphemeral;
+ pNew->addrOpenEphm[0] = -1;
+ pNew->addrOpenEphm[1] = -1;
+ pNew->nSelectRow = p->nSelectRow;
+ pNew->pWith = withDup(db, p->pWith);
+ sqlite3SelectSetName(pNew, p->zSelName);
+ *pp = pNew;
+ pp = &pNew->pPrior;
+ pNext = pNew;
+ }
+
+ return pRet;
}
#else
SQLITE_PRIVATE Select *sqlite3SelectDup(sqlite3 *db, Select *p, int flags){
@@ -91202,7 +91986,7 @@ no_mem:
** Or: (a,b,c) = (SELECT x,y,z FROM ....)
**
** For each term of the vector assignment, append new entries to the
-** expression list pList. In the case of a subquery on the LHS, append
+** expression list pList. In the case of a subquery on the RHS, append
** TK_SELECT_COLUMN expressions.
*/
SQLITE_PRIVATE ExprList *sqlite3ExprListAppendVector(
@@ -91219,13 +92003,19 @@ SQLITE_PRIVATE ExprList *sqlite3ExprListAppendVector(
** exit prior to this routine being invoked */
if( NEVER(pColumns==0) ) goto vector_append_error;
if( pExpr==0 ) goto vector_append_error;
- n = sqlite3ExprVectorSize(pExpr);
- if( pColumns->nId!=n ){
+
+ /* If the RHS is a vector, then we can immediately check to see that
+ ** the size of the RHS and LHS match. But if the RHS is a SELECT,
+ ** wildcards ("*") in the result set of the SELECT must be expanded before
+ ** we can do the size check, so defer the size check until code generation.
+ */
+ if( pExpr->op!=TK_SELECT && pColumns->nId!=(n=sqlite3ExprVectorSize(pExpr)) ){
sqlite3ErrorMsg(pParse, "%d columns assigned %d values",
pColumns->nId, n);
goto vector_append_error;
}
- for(i=0; i<n; i++){
+
+ for(i=0; i<pColumns->nId; i++){
Expr *pSubExpr = sqlite3ExprForVectorField(pParse, pExpr, i);
pList = sqlite3ExprListAppend(pParse, pList, pSubExpr);
if( pList ){
@@ -91234,11 +92024,20 @@ SQLITE_PRIVATE ExprList *sqlite3ExprListAppendVector(
pColumns->a[i].zName = 0;
}
}
+
if( pExpr->op==TK_SELECT ){
if( pList && pList->a[iFirst].pExpr ){
- assert( pList->a[iFirst].pExpr->op==TK_SELECT_COLUMN );
- pList->a[iFirst].pExpr->pRight = pExpr;
+ Expr *pFirst = pList->a[iFirst].pExpr;
+ assert( pFirst->op==TK_SELECT_COLUMN );
+
+ /* Store the SELECT statement in pRight so it will be deleted when
+ ** sqlite3ExprListDelete() is called */
+ pFirst->pRight = pExpr;
pExpr = 0;
+
+ /* Remember the size of the LHS in iTable so that we can check that
+ ** the RHS and LHS sizes match during code generation. */
+ pFirst->iTable = pColumns->nId;
}
}
@@ -92069,6 +92868,28 @@ SQLITE_PRIVATE void sqlite3SubselectError(Parse *pParse, int nActual, int nExpec
#endif
/*
+** Expression pExpr is a vector that has been used in a context where
+** it is not permitted. If pExpr is a sub-select vector, this routine
+** loads the Parse object with a message of the form:
+**
+** "sub-select returns N columns - expected 1"
+**
+** Or, if it is a regular scalar vector:
+**
+** "row value misused"
+*/
+SQLITE_PRIVATE void sqlite3VectorErrorMsg(Parse *pParse, Expr *pExpr){
+#ifndef SQLITE_OMIT_SUBQUERY
+ if( pExpr->flags & EP_xIsSelect ){
+ sqlite3SubselectError(pParse, pExpr->x.pSelect->pEList->nExpr, 1);
+ }else
+#endif
+ {
+ sqlite3ErrorMsg(pParse, "row value misused");
+ }
+}
+
+/*
** Generate code for scalar subqueries used as a subquery expression, EXISTS,
** or IN operators. Examples:
**
@@ -92255,7 +93076,7 @@ SQLITE_PRIVATE int sqlite3CodeSubselect(
}else{
sqlite3VdbeAddOp4(v, OP_MakeRecord, r3, 1, r2, &affinity, 1);
sqlite3ExprCacheAffinityChange(pParse, r3, 1);
- sqlite3VdbeAddOp2(v, OP_IdxInsert, pExpr->iTable, r2);
+ sqlite3VdbeAddOp4Int(v, OP_IdxInsert, pExpr->iTable, r2, r3, 1);
}
}
}
@@ -92350,11 +93171,7 @@ SQLITE_PRIVATE int sqlite3ExprCheckIN(Parse *pParse, Expr *pIn){
return 1;
}
}else if( nVector!=1 ){
- if( (pIn->pLeft->flags & EP_xIsSelect) ){
- sqlite3SubselectError(pParse, nVector, 1);
- }else{
- sqlite3ErrorMsg(pParse, "row value misused");
- }
+ sqlite3VectorErrorMsg(pParse, pIn->pLeft);
return 1;
}
return 0;
@@ -92659,22 +93476,22 @@ static void codeInteger(Parse *pParse, Expr *pExpr, int negFlag, int iMem){
const char *z = pExpr->u.zToken;
assert( z!=0 );
c = sqlite3DecOrHexToI64(z, &value);
- if( c==0 || (c==2 && negFlag) ){
- if( negFlag ){ value = c==2 ? SMALLEST_INT64 : -value; }
- sqlite3VdbeAddOp4Dup8(v, OP_Int64, 0, iMem, 0, (u8*)&value, P4_INT64);
- }else{
+ if( c==1 || (c==2 && !negFlag) || (negFlag && value==SMALLEST_INT64)){
#ifdef SQLITE_OMIT_FLOATING_POINT
sqlite3ErrorMsg(pParse, "oversized integer: %s%s", negFlag ? "-" : "", z);
#else
#ifndef SQLITE_OMIT_HEX_INTEGER
if( sqlite3_strnicmp(z,"0x",2)==0 ){
- sqlite3ErrorMsg(pParse, "hex literal too big: %s", z);
+ sqlite3ErrorMsg(pParse, "hex literal too big: %s%s", negFlag?"-":"",z);
}else
#endif
{
codeReal(v, z, negFlag, iMem);
}
#endif
+ }else{
+ if( negFlag ){ value = c==2 ? SMALLEST_INT64 : -value; }
+ sqlite3VdbeAddOp4Dup8(v, OP_Int64, 0, iMem, 0, (u8*)&value, P4_INT64);
}
}
}
@@ -93013,7 +93830,7 @@ static int exprCodeVector(Parse *pParse, Expr *p, int *piFreeable){
iResult = pParse->nMem+1;
pParse->nMem += nResult;
for(i=0; i<nResult; i++){
- sqlite3ExprCode(pParse, p->x.pList->a[i].pExpr, i+iResult);
+ sqlite3ExprCodeFactorable(pParse, p->x.pList->a[i].pExpr, i+iResult);
}
}
}
@@ -93125,9 +93942,10 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
assert( pExpr->u.zToken[0]!=0 );
sqlite3VdbeAddOp2(v, OP_Variable, pExpr->iColumn, target);
if( pExpr->u.zToken[1]!=0 ){
- assert( pExpr->u.zToken[0]=='?'
- || strcmp(pExpr->u.zToken, pParse->azVar[pExpr->iColumn-1])==0 );
- sqlite3VdbeChangeP4(v, -1, pParse->azVar[pExpr->iColumn-1], P4_STATIC);
+ const char *z = sqlite3VListNumToName(pParse->pVList, pExpr->iColumn);
+ assert( pExpr->u.zToken[0]=='?' || strcmp(pExpr->u.zToken, z)==0 );
+ pParse->pVList[0] = 0; /* Indicate VList may no longer be enlarged */
+ sqlite3VdbeAppendP4(v, (char*)z, P4_STATIC);
}
return target;
}
@@ -93277,6 +94095,11 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
u8 enc = ENC(db); /* The text encoding used by this database */
CollSeq *pColl = 0; /* A collating sequence */
+ if( ConstFactorOk(pParse) && sqlite3ExprIsConstantNotJoin(pExpr) ){
+ /* SQL functions can be expensive. So try to move constant functions
+ ** out of the inner loop, even if that means an extra OP_Copy. */
+ return sqlite3ExprCodeAtInit(pParse, pExpr, -1);
+ }
assert( !ExprHasProperty(pExpr, EP_xIsSelect) );
if( ExprHasProperty(pExpr, EP_TokenOnly) ){
pFarg = 0;
@@ -93325,6 +94148,22 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
return sqlite3ExprCodeTarget(pParse, pFarg->a[0].pExpr, target);
}
+#ifdef SQLITE_DEBUG
+ /* The AFFINITY() function evaluates to a string that describes
+ ** the type affinity of the argument. This is used for testing of
+ ** the SQLite type logic.
+ */
+ if( pDef->funcFlags & SQLITE_FUNC_AFFINITY ){
+ const char *azAff[] = { "blob", "text", "numeric", "integer", "real" };
+ char aff;
+ assert( nFarg==1 );
+ aff = sqlite3ExprAffinity(pFarg->a[0].pExpr);
+ sqlite3VdbeLoadString(v, target,
+ aff ? azAff[aff-SQLITE_AFF_BLOB] : "none");
+ return target;
+ }
+#endif
+
for(i=0; i<nFarg; i++){
if( i<32 && sqlite3ExprIsConstant(pFarg->a[i].pExpr) ){
testcase( i==31 );
@@ -93413,9 +94252,17 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
break;
}
case TK_SELECT_COLUMN: {
+ int n;
if( pExpr->pLeft->iTable==0 ){
pExpr->pLeft->iTable = sqlite3CodeSubselect(pParse, pExpr->pLeft, 0, 0);
}
+ assert( pExpr->iTable==0 || pExpr->pLeft->op==TK_SELECT );
+ if( pExpr->iTable
+ && pExpr->iTable!=(n = sqlite3ExprVectorSize(pExpr->pLeft))
+ ){
+ sqlite3ErrorMsg(pParse, "%d columns assigned %d values",
+ pExpr->iTable, n);
+ }
return pExpr->pLeft->iTable + pExpr->iColumn;
}
case TK_IN: {
@@ -93633,24 +94480,40 @@ SQLITE_PRIVATE int sqlite3ExprCodeTarget(Parse *pParse, Expr *pExpr, int target)
/*
** Factor out the code of the given expression to initialization time.
+**
+** If regDest>=0 then the result is always stored in that register and the
+** result is not reusable. If regDest<0 then this routine is free to
+** store the value whereever it wants. The register where the expression
+** is stored is returned. When regDest<0, two identical expressions will
+** code to the same register.
*/
-SQLITE_PRIVATE void sqlite3ExprCodeAtInit(
+SQLITE_PRIVATE int sqlite3ExprCodeAtInit(
Parse *pParse, /* Parsing context */
Expr *pExpr, /* The expression to code when the VDBE initializes */
- int regDest, /* Store the value in this register */
- u8 reusable /* True if this expression is reusable */
+ int regDest /* Store the value in this register */
){
ExprList *p;
assert( ConstFactorOk(pParse) );
p = pParse->pConstExpr;
+ if( regDest<0 && p ){
+ struct ExprList_item *pItem;
+ int i;
+ for(pItem=p->a, i=p->nExpr; i>0; pItem++, i--){
+ if( pItem->reusable && sqlite3ExprCompare(pItem->pExpr,pExpr,-1)==0 ){
+ return pItem->u.iConstExprReg;
+ }
+ }
+ }
pExpr = sqlite3ExprDup(pParse->db, pExpr, 0);
p = sqlite3ExprListAppend(pParse, p, pExpr);
if( p ){
struct ExprList_item *pItem = &p->a[p->nExpr-1];
+ pItem->reusable = regDest<0;
+ if( regDest<0 ) regDest = ++pParse->nMem;
pItem->u.iConstExprReg = regDest;
- pItem->reusable = reusable;
}
pParse->pConstExpr = p;
+ return regDest;
}
/*
@@ -93673,19 +94536,8 @@ SQLITE_PRIVATE int sqlite3ExprCodeTemp(Parse *pParse, Expr *pExpr, int *pReg){
&& pExpr->op!=TK_REGISTER
&& sqlite3ExprIsConstantNotJoin(pExpr)
){
- ExprList *p = pParse->pConstExpr;
- int i;
*pReg = 0;
- if( p ){
- struct ExprList_item *pItem;
- for(pItem=p->a, i=p->nExpr; i>0; pItem++, i--){
- if( pItem->reusable && sqlite3ExprCompare(pItem->pExpr,pExpr,-1)==0 ){
- return pItem->u.iConstExprReg;
- }
- }
- }
- r2 = ++pParse->nMem;
- sqlite3ExprCodeAtInit(pParse, pExpr, r2, 1);
+ r2 = sqlite3ExprCodeAtInit(pParse, pExpr, -1);
}else{
int r1 = sqlite3GetTempReg(pParse);
r2 = sqlite3ExprCodeTarget(pParse, pExpr, r1);
@@ -93739,7 +94591,7 @@ SQLITE_PRIVATE void sqlite3ExprCodeCopy(Parse *pParse, Expr *pExpr, int target){
*/
SQLITE_PRIVATE void sqlite3ExprCodeFactorable(Parse *pParse, Expr *pExpr, int target){
if( pParse->okConstFactor && sqlite3ExprIsConstant(pExpr) ){
- sqlite3ExprCodeAtInit(pParse, pExpr, target, 0);
+ sqlite3ExprCodeAtInit(pParse, pExpr, target);
}else{
sqlite3ExprCode(pParse, pExpr, target);
}
@@ -93803,10 +94655,15 @@ SQLITE_PRIVATE int sqlite3ExprCodeExprList(
if( !ConstFactorOk(pParse) ) flags &= ~SQLITE_ECEL_FACTOR;
for(pItem=pList->a, i=0; i<n; i++, pItem++){
Expr *pExpr = pItem->pExpr;
- if( (flags & SQLITE_ECEL_REF)!=0 && (j = pList->a[i].u.x.iOrderByCol)>0 ){
- sqlite3VdbeAddOp2(v, copyOp, j+srcReg-1, target+i);
+ if( (flags & SQLITE_ECEL_REF)!=0 && (j = pItem->u.x.iOrderByCol)>0 ){
+ if( flags & SQLITE_ECEL_OMITREF ){
+ i--;
+ n--;
+ }else{
+ sqlite3VdbeAddOp2(v, copyOp, j+srcReg-1, target+i);
+ }
}else if( (flags & SQLITE_ECEL_FACTOR)!=0 && sqlite3ExprIsConstant(pExpr) ){
- sqlite3ExprCodeAtInit(pParse, pExpr, target+i, 0);
+ sqlite3ExprCodeAtInit(pParse, pExpr, target+i);
}else{
int inReg = sqlite3ExprCodeTarget(pParse, pExpr, target+i);
if( inReg!=target+i ){
@@ -93879,6 +94736,11 @@ static void exprCodeBetween(
if( xJump ){
xJump(pParse, &exprAnd, dest, jumpIfNull);
}else{
+ /* Mark the expression is being from the ON or USING clause of a join
+ ** so that the sqlite3ExprCodeTarget() routine will not attempt to move
+ ** it into the Parse.pConstExpr list. We should use a new bit for this,
+ ** for clarity, but we are out of bits in the Expr.flags field so we
+ ** have to reuse the EP_FromJoin bit. Bummer. */
exprX.flags |= EP_FromJoin;
sqlite3ExprCodeTarget(pParse, &exprAnd, dest);
}
@@ -94317,11 +95179,10 @@ SQLITE_PRIVATE int sqlite3ExprImpliesExpr(Expr *pE1, Expr *pE2, int iTab){
){
return 1;
}
- if( pE2->op==TK_NOTNULL
- && sqlite3ExprCompare(pE1->pLeft, pE2->pLeft, iTab)==0
- && (pE1->op!=TK_ISNULL && pE1->op!=TK_IS)
- ){
- return 1;
+ if( pE2->op==TK_NOTNULL && pE1->op!=TK_ISNULL && pE1->op!=TK_IS ){
+ Expr *pX = sqlite3ExprSkipCollate(pE1->pLeft);
+ testcase( pX!=pE1->pLeft );
+ if( sqlite3ExprCompare(pX, pE2->pLeft, iTab)==0 ) return 1;
}
return 0;
}
@@ -95243,7 +96104,7 @@ SQLITE_PRIVATE void sqlite3AlterRenameTable(
sqlite3NestedParse(pParse,
"UPDATE \"%w\".%s SET "
"sql = sqlite_rename_parent(sql, %Q, %Q) "
- "WHERE %s;", zDb, SCHEMA_TABLE(iDb), zTabName, zName, zWhere);
+ "WHERE %s;", zDb, MASTER_NAME, zTabName, zName, zWhere);
sqlite3DbFree(db, zWhere);
}
}
@@ -95267,7 +96128,7 @@ SQLITE_PRIVATE void sqlite3AlterRenameTable(
"ELSE name END "
"WHERE tbl_name=%Q COLLATE nocase AND "
"(type='table' OR type='index' OR type='trigger');",
- zDb, SCHEMA_TABLE(iDb), zName, zName, zName,
+ zDb, MASTER_NAME, zName, zName, zName,
#ifndef SQLITE_OMIT_TRIGGER
zName,
#endif
@@ -95428,7 +96289,7 @@ SQLITE_PRIVATE void sqlite3AlterFinishAddColumn(Parse *pParse, Token *pColDef){
"UPDATE \"%w\".%s SET "
"sql = substr(sql,1,%d) || ', ' || %Q || substr(sql,%d) "
"WHERE type = 'table' AND name = %Q",
- zDb, SCHEMA_TABLE(iDb), pNew->addColOffset, zCol, pNew->addColOffset+1,
+ zDb, MASTER_NAME, pNew->addColOffset, zCol, pNew->addColOffset+1,
zTab
);
sqlite3DbFree(db, zCol);
@@ -95512,7 +96373,7 @@ SQLITE_PRIVATE void sqlite3AlterBeginAddColumn(Parse *pParse, SrcList *pSrc){
pNew = (Table*)sqlite3DbMallocZero(db, sizeof(Table));
if( !pNew ) goto exit_begin_add_column;
pParse->pNewTable = pNew;
- pNew->nRef = 1;
+ pNew->nTabRef = 1;
pNew->nCol = pTab->nCol;
assert( pNew->nCol>0 );
nAlloc = (((pNew->nCol-1)/8)*8)+8;
@@ -95532,7 +96393,7 @@ SQLITE_PRIVATE void sqlite3AlterBeginAddColumn(Parse *pParse, SrcList *pSrc){
}
pNew->pSchema = db->aDb[iDb].pSchema;
pNew->addColOffset = pTab->addColOffset;
- pNew->nRef = 1;
+ pNew->nTabRef = 1;
/* Begin a transaction and increment the schema cookie. */
sqlite3BeginWriteOperation(pParse, 0, iDb);
@@ -96347,6 +97208,12 @@ static const FuncDef statPushFuncdef = {
** The content to returned is determined by the parameter J
** which is one of the STAT_GET_xxxx values defined above.
**
+** The stat_get(P,J) function is not available to generic SQL. It is
+** inserted as part of a manually constructed bytecode program. (See
+** the callStatGet() routine below.) It is guaranteed that the P
+** parameter will always be a poiner to a Stat4Accum object, never a
+** NULL.
+**
** If neither STAT3 nor STAT4 are enabled, then J is always
** STAT_GET_STAT1 and is hence omitted and this routine becomes
** a one-parameter function, stat_get(P), that always returns the
@@ -97165,7 +98032,7 @@ static void initAvgEq(Index *pIdx){
}
}
- if( nDist100>nSum100 ){
+ if( nDist100>nSum100 && sumEq<nRow ){
avgEq = ((i64)100 * (nRow - sumEq))/(nDist100 - nSum100);
}
if( avgEq==0 ) avgEq = 1;
@@ -97316,7 +98183,9 @@ static int loadStatTbl(
sqlite3_finalize(pStmt);
return SQLITE_NOMEM_BKPT;
}
- memcpy(pSample->p, sqlite3_column_blob(pStmt, 4), pSample->n);
+ if( pSample->n ){
+ memcpy(pSample->p, sqlite3_column_blob(pStmt, 4), pSample->n);
+ }
pIdx->nSample++;
}
rc = sqlite3_finalize(pStmt);
@@ -97577,6 +98446,7 @@ static void attachFunc(
rc = sqlite3BtreeOpen(pVfs, zPath, db, &aNew->pBt, 0, flags);
sqlite3_free( zPath );
db->nDb++;
+ db->skipBtreeMutex = 0;
if( rc==SQLITE_CONSTRAINT ){
rc = SQLITE_ERROR;
zErrDyn = sqlite3MPrintf(db, "database is already attached");
@@ -97765,6 +98635,7 @@ static void codeAttach(
sqlite3* db = pParse->db;
int regArgs;
+ if( pParse->nErr ) goto attach_end;
memset(&sName, 0, sizeof(NameContext));
sName.pParse = pParse;
@@ -98318,10 +99189,10 @@ SQLITE_PRIVATE void sqlite3AuthContextPop(AuthContext *pContext){
** codeTableLocks() functions.
*/
struct TableLock {
- int iDb; /* The database containing the table to be locked */
- int iTab; /* The root page of the table to be locked */
- u8 isWriteLock; /* True for write lock. False for a read lock */
- const char *zName; /* Name of the table */
+ int iDb; /* The database containing the table to be locked */
+ int iTab; /* The root page of the table to be locked */
+ u8 isWriteLock; /* True for write lock. False for a read lock */
+ const char *zLockName; /* Name of the table */
};
/*
@@ -98347,6 +99218,8 @@ SQLITE_PRIVATE void sqlite3TableLock(
TableLock *p;
assert( iDb>=0 );
+ if( iDb==1 ) return;
+ if( !sqlite3BtreeSharable(pParse->db->aDb[iDb].pBt) ) return;
for(i=0; i<pToplevel->nTableLock; i++){
p = &pToplevel->aTableLock[i];
if( p->iDb==iDb && p->iTab==iTab ){
@@ -98363,7 +99236,7 @@ SQLITE_PRIVATE void sqlite3TableLock(
p->iDb = iDb;
p->iTab = iTab;
p->isWriteLock = isWriteLock;
- p->zName = zName;
+ p->zLockName = zName;
}else{
pToplevel->nTableLock = 0;
sqlite3OomFault(pToplevel->db);
@@ -98385,7 +99258,7 @@ static void codeTableLocks(Parse *pParse){
TableLock *p = &pParse->aTableLock[i];
int p1 = p->iDb;
sqlite3VdbeAddOp4(pVdbe, OP_TableLock, p1, p->iTab, p->isWriteLock,
- p->zName, P4_STATIC);
+ p->zLockName, P4_STATIC);
}
}
#else
@@ -98594,15 +99467,22 @@ SQLITE_PRIVATE Table *sqlite3FindTable(sqlite3 *db, const char *zName, const cha
return 0;
}
#endif
- for(i=OMIT_TEMPDB; i<db->nDb; i++){
- int j = (i<2) ? i^1 : i; /* Search TEMP before MAIN */
- if( zDatabase==0 || sqlite3StrICmp(zDatabase, db->aDb[j].zDbSName)==0 ){
- assert( sqlite3SchemaMutexHeld(db, j, 0) );
- p = sqlite3HashFind(&db->aDb[j].pSchema->tblHash, zName);
- if( p ) break;
+ while(1){
+ for(i=OMIT_TEMPDB; i<db->nDb; i++){
+ int j = (i<2) ? i^1 : i; /* Search TEMP before MAIN */
+ if( zDatabase==0 || sqlite3StrICmp(zDatabase, db->aDb[j].zDbSName)==0 ){
+ assert( sqlite3SchemaMutexHeld(db, j, 0) );
+ p = sqlite3HashFind(&db->aDb[j].pSchema->tblHash, zName);
+ if( p ) return p;
+ }
}
+ /* Not found. If the name we were looking for was temp.sqlite_master
+ ** then change the name to sqlite_temp_master and try again. */
+ if( sqlite3StrICmp(zName, MASTER_NAME)!=0 ) break;
+ if( sqlite3_stricmp(zDatabase, db->aDb[1].zDbSName)!=0 ) break;
+ zName = TEMP_MASTER_NAME;
}
- return p;
+ return 0;
}
/*
@@ -98638,6 +99518,9 @@ SQLITE_PRIVATE Table *sqlite3LocateTable(
** CREATE, then check to see if it is the name of an virtual table that
** can be an eponymous virtual table. */
Module *pMod = (Module*)sqlite3HashFind(&pParse->db->aModule, zName);
+ if( pMod==0 && sqlite3_strnicmp(zName, "pragma_", 7)==0 ){
+ pMod = sqlite3PragmaVtabRegister(pParse->db, zName);
+ }
if( pMod && sqlite3VtabEponymousTableInit(pParse, pMod) ){
return pMod->pEpoTab;
}
@@ -98920,7 +99803,7 @@ static void SQLITE_NOINLINE deleteTable(sqlite3 *db, Table *pTable){
SQLITE_PRIVATE void sqlite3DeleteTable(sqlite3 *db, Table *pTable){
/* Do not delete the table until the reference count reaches zero. */
if( !pTable ) return;
- if( ((!db || db->pnBytesFreed==0) && (--pTable->nRef)>0) ) return;
+ if( ((!db || db->pnBytesFreed==0) && (--pTable->nTabRef)>0) ) return;
deleteTable(db, pTable);
}
@@ -98974,7 +99857,7 @@ SQLITE_PRIVATE char *sqlite3NameFromToken(sqlite3 *db, Token *pName){
*/
SQLITE_PRIVATE void sqlite3OpenMasterTable(Parse *p, int iDb){
Vdbe *v = sqlite3GetVdbe(p);
- sqlite3TableLock(p, iDb, MASTER_ROOT, 1, SCHEMA_TABLE(iDb));
+ sqlite3TableLock(p, iDb, MASTER_ROOT, 1, MASTER_NAME);
sqlite3VdbeAddOp4Int(v, OP_OpenWrite, 0, MASTER_ROOT, iDb, 5);
if( p->nTab==0 ){
p->nTab = 1;
@@ -98992,7 +99875,10 @@ SQLITE_PRIVATE int sqlite3FindDbName(sqlite3 *db, const char *zName){
if( zName ){
Db *pDb;
for(i=(db->nDb-1), pDb=&db->aDb[i]; i>=0; i--, pDb--){
- if( 0==sqlite3StrICmp(pDb->zDbSName, zName) ) break;
+ if( 0==sqlite3_stricmp(pDb->zDbSName, zName) ) break;
+ /* "main" is always an acceptable alias for the primary database
+ ** even if it has been renamed using SQLITE_DBCONFIG_MAINDBNAME. */
+ if( i==0 && 0==sqlite3_stricmp("main", zName) ) break;
}
}
return i;
@@ -99211,7 +100097,7 @@ SQLITE_PRIVATE void sqlite3StartTable(
pTable->zName = zName;
pTable->iPKey = -1;
pTable->pSchema = db->aDb[iDb].pSchema;
- pTable->nRef = 1;
+ pTable->nTabRef = 1;
pTable->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) );
assert( pParse->pNewTable==0 );
pParse->pNewTable = pTable;
@@ -100277,7 +101163,7 @@ SQLITE_PRIVATE void sqlite3EndTable(
"UPDATE %Q.%s "
"SET type='%s', name=%Q, tbl_name=%Q, rootpage=#%d, sql=%Q "
"WHERE rowid=#%d",
- db->aDb[iDb].zDbSName, SCHEMA_TABLE(iDb),
+ db->aDb[iDb].zDbSName, MASTER_NAME,
zType,
p->zName,
p->zName,
@@ -100614,7 +101500,7 @@ static void destroyRootPage(Parse *pParse, int iTable, int iDb){
*/
sqlite3NestedParse(pParse,
"UPDATE %Q.%s SET rootpage=%d WHERE #%d AND rootpage=#%d",
- pParse->db->aDb[iDb].zDbSName, SCHEMA_TABLE(iDb), iTable, r1, r1);
+ pParse->db->aDb[iDb].zDbSName, MASTER_NAME, iTable, r1, r1);
#endif
sqlite3ReleaseTempReg(pParse, r1);
}
@@ -100757,7 +101643,7 @@ SQLITE_PRIVATE void sqlite3CodeDropTable(Parse *pParse, Table *pTab, int iDb, in
*/
sqlite3NestedParse(pParse,
"DELETE FROM %Q.%s WHERE tbl_name=%Q and type!='trigger'",
- pDb->zDbSName, SCHEMA_TABLE(iDb), pTab->zName);
+ pDb->zDbSName, MASTER_NAME, pTab->zName);
if( !isView && !IsVirtual(pTab) ){
destroyTable(pParse, pTab);
}
@@ -101104,7 +101990,7 @@ static void sqlite3RefillIndex(Parse *pParse, Index *pIndex, int memRootPage){
}
sqlite3VdbeAddOp3(v, OP_SorterData, iSorter, regRecord, iIdx);
sqlite3VdbeAddOp3(v, OP_Last, iIdx, 0, -1);
- sqlite3VdbeAddOp3(v, OP_IdxInsert, iIdx, regRecord, 0);
+ sqlite3VdbeAddOp2(v, OP_IdxInsert, iIdx, regRecord);
sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT);
sqlite3ReleaseTempReg(pParse, regRecord);
sqlite3VdbeAddOp2(v, OP_SorterNext, iSorter, addr2); VdbeCoverage(v);
@@ -101649,7 +102535,7 @@ SQLITE_PRIVATE void sqlite3CreateIndex(
*/
sqlite3NestedParse(pParse,
"INSERT INTO %Q.%s VALUES('index',%Q,%Q,#%d,%Q);",
- db->aDb[iDb].zDbSName, SCHEMA_TABLE(iDb),
+ db->aDb[iDb].zDbSName, MASTER_NAME,
pIndex->zName,
pTab->zName,
iMem,
@@ -101801,7 +102687,7 @@ SQLITE_PRIVATE void sqlite3DropIndex(Parse *pParse, SrcList *pName, int ifExists
sqlite3BeginWriteOperation(pParse, 1, iDb);
sqlite3NestedParse(pParse,
"DELETE FROM %Q.%s WHERE name=%Q AND type='index'",
- db->aDb[iDb].zDbSName, SCHEMA_TABLE(iDb), pIndex->zName
+ db->aDb[iDb].zDbSName, MASTER_NAME, pIndex->zName
);
sqlite3ClearStatTables(pParse, iDb, "idx", pIndex->zName);
sqlite3ChangeCookie(pParse, iDb);
@@ -101944,7 +102830,7 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListEnlarge(
/* Allocate additional space if needed */
if( (u32)pSrc->nSrc+nExtra>pSrc->nAlloc ){
SrcList *pNew;
- int nAlloc = pSrc->nSrc+nExtra;
+ int nAlloc = pSrc->nSrc*2+nExtra;
int nGot;
pNew = sqlite3DbRealloc(db, pSrc,
sizeof(*pSrc) + (nAlloc-1)*sizeof(pSrc->a[0]) );
@@ -102022,9 +102908,12 @@ SQLITE_PRIVATE SrcList *sqlite3SrcListAppend(
pList = sqlite3DbMallocRawNN(db, sizeof(SrcList) );
if( pList==0 ) return 0;
pList->nAlloc = 1;
- pList->nSrc = 0;
+ pList->nSrc = 1;
+ memset(&pList->a[0], 0, sizeof(pList->a[0]));
+ pList->a[0].iCursor = -1;
+ }else{
+ pList = sqlite3SrcListEnlarge(db, pList, 1, pList->nSrc);
}
- pList = sqlite3SrcListEnlarge(db, pList, 1, pList->nSrc);
if( db->mallocFailed ){
sqlite3SrcListDelete(db, pList);
return 0;
@@ -103239,7 +104128,7 @@ SQLITE_PRIVATE Table *sqlite3SrcListLookup(Parse *pParse, SrcList *pSrc){
sqlite3DeleteTable(pParse->db, pItem->pTab);
pItem->pTab = pTab;
if( pTab ){
- pTab->nRef++;
+ pTab->nTabRef++;
}
if( sqlite3IndexedByLookup(pParse, pItem) ){
pTab = 0;
@@ -103367,7 +104256,7 @@ SQLITE_PRIVATE Expr *sqlite3LimitWhere(
** );
*/
- pSelectRowid = sqlite3PExpr(pParse, TK_ROW, 0, 0, 0);
+ pSelectRowid = sqlite3PExpr(pParse, TK_ROW, 0, 0);
if( pSelectRowid == 0 ) goto limit_where_cleanup;
pEList = sqlite3ExprListAppend(pParse, 0, pSelectRowid);
if( pEList == 0 ) goto limit_where_cleanup;
@@ -103386,8 +104275,8 @@ SQLITE_PRIVATE Expr *sqlite3LimitWhere(
if( pSelect == 0 ) return 0;
/* now generate the new WHERE rowid IN clause for the DELETE/UDPATE */
- pWhereRowid = sqlite3PExpr(pParse, TK_ROW, 0, 0, 0);
- pInClause = pWhereRowid ? sqlite3PExpr(pParse, TK_IN, pWhereRowid, 0, 0) : 0;
+ pWhereRowid = sqlite3PExpr(pParse, TK_ROW, 0, 0);
+ pInClause = pWhereRowid ? sqlite3PExpr(pParse, TK_IN, pWhereRowid, 0) : 0;
sqlite3PExprAddSelect(pParse, pInClause, pSelect);
return pInClause;
@@ -103652,7 +104541,7 @@ SQLITE_PRIVATE void sqlite3DeleteFrom(
nKey = 0; /* Zero tells OP_Found to use a composite key */
sqlite3VdbeAddOp4(v, OP_MakeRecord, iPk, nPk, iKey,
sqlite3IndexAffinityStr(pParse->db, pPk), nPk);
- sqlite3VdbeAddOp2(v, OP_IdxInsert, iEphCur, iKey);
+ sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iEphCur, iKey, iPk, nPk);
}else{
/* Add the rowid of the row to be deleted to the RowSet */
nKey = 1; /* OP_Seek always uses a single rowid */
@@ -103698,7 +104587,7 @@ SQLITE_PRIVATE void sqlite3DeleteFrom(
}
}else if( pPk ){
addrLoop = sqlite3VdbeAddOp1(v, OP_Rewind, iEphCur); VdbeCoverage(v);
- sqlite3VdbeAddOp2(v, OP_RowKey, iEphCur, iKey);
+ sqlite3VdbeAddOp2(v, OP_RowData, iEphCur, iKey);
assert( nKey==0 ); /* OP_Found will use a composite key */
}else{
addrLoop = sqlite3VdbeAddOp3(v, OP_RowSetRead, iRowSet, 0, iKey);
@@ -103722,12 +104611,8 @@ SQLITE_PRIVATE void sqlite3DeleteFrom(
#endif
{
int count = (pParse->nested==0); /* True to count changes */
- int iIdxNoSeek = -1;
- if( bComplex==0 && aiCurOnePass[1]!=iDataCur ){
- iIdxNoSeek = aiCurOnePass[1];
- }
sqlite3GenerateRowDelete(pParse, pTab, pTrigger, iDataCur, iIdxCur,
- iKey, nKey, count, OE_Default, eOnePass, iIdxNoSeek);
+ iKey, nKey, count, OE_Default, eOnePass, aiCurOnePass[1]);
}
/* End of the loop over all rowids/primary-keys. */
@@ -103741,14 +104626,6 @@ SQLITE_PRIVATE void sqlite3DeleteFrom(
sqlite3VdbeGoto(v, addrLoop);
sqlite3VdbeJumpHere(v, addrLoop);
}
-
- /* Close the cursors open on the table and its indexes. */
- if( !isView && !IsVirtual(pTab) ){
- if( !pPk ) sqlite3VdbeAddOp1(v, OP_Close, iDataCur);
- for(i=0, pIdx=pTab->pIndex; pIdx; i++, pIdx=pIdx->pNext){
- sqlite3VdbeAddOp1(v, OP_Close, iIdxCur + i);
- }
- }
} /* End non-truncate path */
/* Update the sqlite_sequence table by storing the content of the
@@ -103815,15 +104692,17 @@ delete_from_cleanup:
**
** If eMode is ONEPASS_MULTI, then this call is being made as part
** of a ONEPASS delete that affects multiple rows. In this case, if
-** iIdxNoSeek is a valid cursor number (>=0), then its position should
-** be preserved following the delete operation. Or, if iIdxNoSeek is not
-** a valid cursor number, the position of iDataCur should be preserved
-** instead.
+** iIdxNoSeek is a valid cursor number (>=0) and is not the same as
+** iDataCur, then its position should be preserved following the delete
+** operation. Or, if iIdxNoSeek is not a valid cursor number, the
+** position of iDataCur should be preserved instead.
**
** iIdxNoSeek:
-** If iIdxNoSeek is a valid cursor number (>=0), then it identifies an
-** index cursor (from within array of cursors starting at iIdxCur) that
-** already points to the index entry to be deleted.
+** If iIdxNoSeek is a valid cursor number (>=0) not equal to iDataCur,
+** then it identifies an index cursor (from within array of cursors
+** starting at iIdxCur) that already points to the index entry to be deleted.
+** Except, this optimization is disabled if there are BEFORE triggers since
+** the trigger body might have moved the cursor.
*/
SQLITE_PRIVATE void sqlite3GenerateRowDelete(
Parse *pParse, /* Parsing context */
@@ -103894,13 +104773,18 @@ SQLITE_PRIVATE void sqlite3GenerateRowDelete(
/* If any BEFORE triggers were coded, then seek the cursor to the
** row to be deleted again. It may be that the BEFORE triggers moved
- ** the cursor or of already deleted the row that the cursor was
+ ** the cursor or already deleted the row that the cursor was
** pointing to.
+ **
+ ** Also disable the iIdxNoSeek optimization since the BEFORE trigger
+ ** may have moved that cursor.
*/
if( addrStart<sqlite3VdbeCurrentAddr(v) ){
sqlite3VdbeAddOp4Int(v, opSeek, iDataCur, iLabel, iPk, nPk);
VdbeCoverageIf(v, opSeek==OP_NotExists);
VdbeCoverageIf(v, opSeek==OP_NotFound);
+ testcase( iIdxNoSeek>=0 );
+ iIdxNoSeek = -1;
}
/* Do FK processing. This call checks that any FK constraints that
@@ -103923,11 +104807,13 @@ SQLITE_PRIVATE void sqlite3GenerateRowDelete(
u8 p5 = 0;
sqlite3GenerateRowIndexDelete(pParse, pTab, iDataCur, iIdxCur,0,iIdxNoSeek);
sqlite3VdbeAddOp2(v, OP_Delete, iDataCur, (count?OPFLAG_NCHANGE:0));
- sqlite3VdbeChangeP4(v, -1, (char*)pTab, P4_TABLE);
+ if( pParse->nested==0 ){
+ sqlite3VdbeAppendP4(v, (char*)pTab, P4_TABLE);
+ }
if( eMode!=ONEPASS_OFF ){
sqlite3VdbeChangeP5(v, OPFLAG_AUXDELETE);
}
- if( iIdxNoSeek>=0 ){
+ if( iIdxNoSeek>=0 && iIdxNoSeek!=iDataCur ){
sqlite3VdbeAddOp1(v, OP_Delete, iIdxNoSeek);
}
if( eMode==ONEPASS_MULTI ) p5 |= OPFLAG_SAVEPOSITION;
@@ -104081,6 +104967,10 @@ SQLITE_PRIVATE int sqlite3GenerateIndexKey(
}
if( regOut ){
sqlite3VdbeAddOp3(v, OP_MakeRecord, regBase, nCol, regOut);
+ if( pIdx->pTable->pSelect ){
+ const char *zAff = sqlite3IndexAffinityStr(pParse->db, pIdx);
+ sqlite3VdbeChangeP4(v, -1, zAff, P4_TRANSIENT);
+ }
}
sqlite3ReleaseTempRange(pParse, regBase, nCol);
return regBase;
@@ -104302,23 +105192,28 @@ static void instrFunc(
if( typeHaystack==SQLITE_NULL || typeNeedle==SQLITE_NULL ) return;
nHaystack = sqlite3_value_bytes(argv[0]);
nNeedle = sqlite3_value_bytes(argv[1]);
- if( typeHaystack==SQLITE_BLOB && typeNeedle==SQLITE_BLOB ){
- zHaystack = sqlite3_value_blob(argv[0]);
- zNeedle = sqlite3_value_blob(argv[1]);
- isText = 0;
- }else{
- zHaystack = sqlite3_value_text(argv[0]);
- zNeedle = sqlite3_value_text(argv[1]);
- isText = 1;
- }
- while( nNeedle<=nHaystack && memcmp(zHaystack, zNeedle, nNeedle)!=0 ){
- N++;
- do{
- nHaystack--;
- zHaystack++;
- }while( isText && (zHaystack[0]&0xc0)==0x80 );
+ if( nNeedle>0 ){
+ if( typeHaystack==SQLITE_BLOB && typeNeedle==SQLITE_BLOB ){
+ zHaystack = sqlite3_value_blob(argv[0]);
+ zNeedle = sqlite3_value_blob(argv[1]);
+ assert( zNeedle!=0 );
+ assert( zHaystack!=0 || nHaystack==0 );
+ isText = 0;
+ }else{
+ zHaystack = sqlite3_value_text(argv[0]);
+ zNeedle = sqlite3_value_text(argv[1]);
+ isText = 1;
+ if( zHaystack==0 || zNeedle==0 ) return;
+ }
+ while( nNeedle<=nHaystack && memcmp(zHaystack, zNeedle, nNeedle)!=0 ){
+ N++;
+ do{
+ nHaystack--;
+ zHaystack++;
+ }while( isText && (zHaystack[0]&0xc0)==0x80 );
+ }
+ if( nNeedle>nHaystack ) N = 0;
}
- if( nNeedle>nHaystack ) N = 0;
sqlite3_result_int(context, N);
}
@@ -104698,9 +105593,19 @@ static const struct compareInfo likeInfoNorm = { '%', '_', 0, 1 };
static const struct compareInfo likeInfoAlt = { '%', '_', 0, 0 };
/*
-** Compare two UTF-8 strings for equality where the first string can
-** potentially be a "glob" or "like" expression. Return true (1) if they
-** are the same and false (0) if they are different.
+** Possible error returns from patternMatch()
+*/
+#define SQLITE_MATCH 0
+#define SQLITE_NOMATCH 1
+#define SQLITE_NOWILDCARDMATCH 2
+
+/*
+** Compare two UTF-8 strings for equality where the first string is
+** a GLOB or LIKE expression. Return values:
+**
+** SQLITE_MATCH: Match
+** SQLITE_NOMATCH: No match
+** SQLITE_NOWILDCARDMATCH: No match in spite of having * or % wildcards.
**
** Globbing rules:
**
@@ -104751,30 +105656,31 @@ static int patternCompare(
** single character of the input string for each "?" skipped */
while( (c=Utf8Read(zPattern)) == matchAll || c == matchOne ){
if( c==matchOne && sqlite3Utf8Read(&zString)==0 ){
- return 0;
+ return SQLITE_NOWILDCARDMATCH;
}
}
if( c==0 ){
- return 1; /* "*" at the end of the pattern matches */
+ return SQLITE_MATCH; /* "*" at the end of the pattern matches */
}else if( c==matchOther ){
if( pInfo->matchSet==0 ){
c = sqlite3Utf8Read(&zPattern);
- if( c==0 ) return 0;
+ if( c==0 ) return SQLITE_NOWILDCARDMATCH;
}else{
/* "[...]" immediately follows the "*". We have to do a slow
** recursive search in this case, but it is an unusual case. */
assert( matchOther<0x80 ); /* '[' is a single-byte character */
- while( *zString
- && patternCompare(&zPattern[-1],zString,pInfo,matchOther)==0 ){
+ while( *zString ){
+ int bMatch = patternCompare(&zPattern[-1],zString,pInfo,matchOther);
+ if( bMatch!=SQLITE_NOMATCH ) return bMatch;
SQLITE_SKIP_UTF8(zString);
}
- return *zString!=0;
+ return SQLITE_NOWILDCARDMATCH;
}
}
/* At this point variable c contains the first character of the
** pattern string past the "*". Search in the input string for the
- ** first matching character and recursively contine the match from
+ ** first matching character and recursively continue the match from
** that point.
**
** For a case-insensitive search, set variable cx to be the same as
@@ -104783,6 +105689,7 @@ static int patternCompare(
*/
if( c<=0x80 ){
u32 cx;
+ int bMatch;
if( noCase ){
cx = sqlite3Toupper(c);
c = sqlite3Tolower(c);
@@ -104791,27 +105698,30 @@ static int patternCompare(
}
while( (c2 = *(zString++))!=0 ){
if( c2!=c && c2!=cx ) continue;
- if( patternCompare(zPattern,zString,pInfo,matchOther) ) return 1;
+ bMatch = patternCompare(zPattern,zString,pInfo,matchOther);
+ if( bMatch!=SQLITE_NOMATCH ) return bMatch;
}
}else{
+ int bMatch;
while( (c2 = Utf8Read(zString))!=0 ){
if( c2!=c ) continue;
- if( patternCompare(zPattern,zString,pInfo,matchOther) ) return 1;
+ bMatch = patternCompare(zPattern,zString,pInfo,matchOther);
+ if( bMatch!=SQLITE_NOMATCH ) return bMatch;
}
}
- return 0;
+ return SQLITE_NOWILDCARDMATCH;
}
if( c==matchOther ){
if( pInfo->matchSet==0 ){
c = sqlite3Utf8Read(&zPattern);
- if( c==0 ) return 0;
+ if( c==0 ) return SQLITE_NOMATCH;
zEscaped = zPattern;
}else{
u32 prior_c = 0;
int seen = 0;
int invert = 0;
c = sqlite3Utf8Read(&zString);
- if( c==0 ) return 0;
+ if( c==0 ) return SQLITE_NOMATCH;
c2 = sqlite3Utf8Read(&zPattern);
if( c2=='^' ){
invert = 1;
@@ -104835,7 +105745,7 @@ static int patternCompare(
c2 = sqlite3Utf8Read(&zPattern);
}
if( c2==0 || (seen ^ invert)==0 ){
- return 0;
+ return SQLITE_NOMATCH;
}
continue;
}
@@ -104846,23 +105756,25 @@ static int patternCompare(
continue;
}
if( c==matchOne && zPattern!=zEscaped && c2!=0 ) continue;
- return 0;
+ return SQLITE_NOMATCH;
}
- return *zString==0;
+ return *zString==0 ? SQLITE_MATCH : SQLITE_NOMATCH;
}
/*
-** The sqlite3_strglob() interface.
+** The sqlite3_strglob() interface. Return 0 on a match (like strcmp()) and
+** non-zero if there is no match.
*/
SQLITE_API int sqlite3_strglob(const char *zGlobPattern, const char *zString){
- return patternCompare((u8*)zGlobPattern, (u8*)zString, &globInfo, '[')==0;
+ return patternCompare((u8*)zGlobPattern, (u8*)zString, &globInfo, '[');
}
/*
-** The sqlite3_strlike() interface.
+** The sqlite3_strlike() interface. Return 0 on a match and non-zero for
+** a miss - like strcmp().
*/
SQLITE_API int sqlite3_strlike(const char *zPattern, const char *zStr, unsigned int esc){
- return patternCompare((u8*)zPattern, (u8*)zStr, &likeInfoNorm, esc)==0;
+ return patternCompare((u8*)zPattern, (u8*)zStr, &likeInfoNorm, esc);
}
/*
@@ -104943,7 +105855,7 @@ static void likeFunc(
#ifdef SQLITE_TEST
sqlite3_like_count++;
#endif
- sqlite3_result_int(context, patternCompare(zB, zA, pInfo, escape));
+ sqlite3_result_int(context, patternCompare(zB, zA, pInfo, escape)==SQLITE_MATCH);
}
}
@@ -105714,7 +106626,7 @@ static void groupConcatStep(
zSep = ",";
nSep = 1;
}
- if( nSep ) sqlite3StrAccumAppend(pAccum, zSep, nSep);
+ if( zSep ) sqlite3StrAccumAppend(pAccum, zSep, nSep);
}
zVal = (char*)sqlite3_value_text(argv[0]);
nVal = sqlite3_value_bytes(argv[0]);
@@ -105855,6 +106767,9 @@ SQLITE_PRIVATE void sqlite3RegisterBuiltinFunctions(void){
FUNCTION2(unlikely, 1, 0, 0, noopFunc, SQLITE_FUNC_UNLIKELY),
FUNCTION2(likelihood, 2, 0, 0, noopFunc, SQLITE_FUNC_UNLIKELY),
FUNCTION2(likely, 1, 0, 0, noopFunc, SQLITE_FUNC_UNLIKELY),
+#ifdef SQLITE_DEBUG
+ FUNCTION2(affinity, 1, 0, 0, noopFunc, SQLITE_FUNC_AFFINITY),
+#endif
FUNCTION(ltrim, 1, 1, 0, trimFunc ),
FUNCTION(ltrim, 2, 1, 0, trimFunc ),
FUNCTION(rtrim, 1, 2, 0, trimFunc ),
@@ -106177,7 +107092,7 @@ SQLITE_PRIVATE int sqlite3FkLocateIndex(
}
for(pIdx=pParent->pIndex; pIdx; pIdx=pIdx->pNext){
- if( pIdx->nKeyCol==nCol && IsUniqueIndex(pIdx) ){
+ if( pIdx->nKeyCol==nCol && IsUniqueIndex(pIdx) && pIdx->pPartIdxWhere==0 ){
/* pIdx is a UNIQUE index (or a PRIMARY KEY) and has the right number
** of columns. If each indexed column corresponds to a foreign key
** column of pFKey, then this index is a winner. */
@@ -106536,7 +107451,7 @@ static void fkScanChildren(
assert( iCol>=0 );
zCol = pFKey->pFrom->aCol[iCol].zName;
pRight = sqlite3Expr(db, TK_ID, zCol);
- pEq = sqlite3PExpr(pParse, TK_EQ, pLeft, pRight, 0);
+ pEq = sqlite3PExpr(pParse, TK_EQ, pLeft, pRight);
pWhere = sqlite3ExprAnd(db, pWhere, pEq);
}
@@ -106558,7 +107473,7 @@ static void fkScanChildren(
if( HasRowid(pTab) ){
pLeft = exprTableRegister(pParse, pTab, regData, -1);
pRight = exprTableColumn(db, pTab, pSrc->a[0].iCursor, -1);
- pNe = sqlite3PExpr(pParse, TK_NE, pLeft, pRight, 0);
+ pNe = sqlite3PExpr(pParse, TK_NE, pLeft, pRight);
}else{
Expr *pEq, *pAll = 0;
Index *pPk = sqlite3PrimaryKeyIndex(pTab);
@@ -106568,10 +107483,10 @@ static void fkScanChildren(
assert( iCol>=0 );
pLeft = exprTableRegister(pParse, pTab, regData, iCol);
pRight = exprTableColumn(db, pTab, pSrc->a[0].iCursor, iCol);
- pEq = sqlite3PExpr(pParse, TK_EQ, pLeft, pRight, 0);
+ pEq = sqlite3PExpr(pParse, TK_EQ, pLeft, pRight);
pAll = sqlite3ExprAnd(db, pAll, pEq);
}
- pNe = sqlite3PExpr(pParse, TK_NOT, pAll, 0, 0);
+ pNe = sqlite3PExpr(pParse, TK_NOT, pAll, 0);
}
pWhere = sqlite3ExprAnd(db, pWhere, pNe);
}
@@ -106959,7 +107874,7 @@ SQLITE_PRIVATE void sqlite3FkCheck(
struct SrcList_item *pItem = pSrc->a;
pItem->pTab = pFKey->pFrom;
pItem->zName = pFKey->pFrom->zName;
- pItem->pTab->nRef++;
+ pItem->pTab->nTabRef++;
pItem->iCursor = pParse->nTab++;
if( regNew!=0 ){
@@ -107157,10 +108072,9 @@ static Trigger *fkActionTrigger(
pEq = sqlite3PExpr(pParse, TK_EQ,
sqlite3PExpr(pParse, TK_DOT,
sqlite3ExprAlloc(db, TK_ID, &tOld, 0),
- sqlite3ExprAlloc(db, TK_ID, &tToCol, 0)
- , 0),
+ sqlite3ExprAlloc(db, TK_ID, &tToCol, 0)),
sqlite3ExprAlloc(db, TK_ID, &tFromCol, 0)
- , 0);
+ );
pWhere = sqlite3ExprAnd(db, pWhere, pEq);
/* For ON UPDATE, construct the next term of the WHEN clause.
@@ -107172,13 +108086,11 @@ static Trigger *fkActionTrigger(
pEq = sqlite3PExpr(pParse, TK_IS,
sqlite3PExpr(pParse, TK_DOT,
sqlite3ExprAlloc(db, TK_ID, &tOld, 0),
- sqlite3ExprAlloc(db, TK_ID, &tToCol, 0),
- 0),
+ sqlite3ExprAlloc(db, TK_ID, &tToCol, 0)),
sqlite3PExpr(pParse, TK_DOT,
sqlite3ExprAlloc(db, TK_ID, &tNew, 0),
- sqlite3ExprAlloc(db, TK_ID, &tToCol, 0),
- 0),
- 0);
+ sqlite3ExprAlloc(db, TK_ID, &tToCol, 0))
+ );
pWhen = sqlite3ExprAnd(db, pWhen, pEq);
}
@@ -107187,8 +108099,7 @@ static Trigger *fkActionTrigger(
if( action==OE_Cascade ){
pNew = sqlite3PExpr(pParse, TK_DOT,
sqlite3ExprAlloc(db, TK_ID, &tNew, 0),
- sqlite3ExprAlloc(db, TK_ID, &tToCol, 0)
- , 0);
+ sqlite3ExprAlloc(db, TK_ID, &tToCol, 0));
}else if( action==OE_SetDflt ){
Expr *pDflt = pFKey->pFrom->aCol[iFromCol].pDflt;
if( pDflt ){
@@ -107244,7 +108155,7 @@ static Trigger *fkActionTrigger(
pStep->pExprList = sqlite3ExprListDup(db, pList, EXPRDUP_REDUCE);
pStep->pSelect = sqlite3SelectDup(db, pSelect, EXPRDUP_REDUCE);
if( pWhen ){
- pWhen = sqlite3PExpr(pParse, TK_NOT, pWhen, 0, 0);
+ pWhen = sqlite3PExpr(pParse, TK_NOT, pWhen, 0);
pTrigger->pWhen = sqlite3ExprDup(db, pWhen, EXPRDUP_REDUCE);
}
}
@@ -107848,7 +108759,7 @@ SQLITE_PRIVATE void sqlite3Insert(
sqlite3 *db; /* The main database structure */
Table *pTab; /* The table to insert into. aka TABLE */
char *zTab; /* Name of the table into which we are inserting */
- int i, j, idx; /* Loop counters */
+ int i, j; /* Loop counters */
Vdbe *v; /* Generate code into this virtual machine */
Index *pIdx; /* For looping over indices of the table */
int nColumn; /* Number of columns in the data */
@@ -108155,8 +109066,10 @@ SQLITE_PRIVATE void sqlite3Insert(
if( aRegIdx==0 ){
goto insert_cleanup;
}
- for(i=0; i<nIdx; i++){
+ for(i=0, pIdx=pTab->pIndex; i<nIdx; pIdx=pIdx->pNext, i++){
+ assert( pIdx );
aRegIdx[i] = ++pParse->nMem;
+ pParse->nMem += pIdx->nColumn;
}
}
@@ -108358,12 +109271,26 @@ SQLITE_PRIVATE void sqlite3Insert(
#endif
{
int isReplace; /* Set to true if constraints may cause a replace */
+ int bUseSeek; /* True to use OPFLAG_SEEKRESULT */
sqlite3GenerateConstraintChecks(pParse, pTab, aRegIdx, iDataCur, iIdxCur,
regIns, 0, ipkColumn>=0, onError, endOfLoop, &isReplace, 0
);
sqlite3FkCheck(pParse, pTab, 0, regIns, 0, 0);
+
+ /* Set the OPFLAG_USESEEKRESULT flag if either (a) there are no REPLACE
+ ** constraints or (b) there are no triggers and this table is not a
+ ** parent table in a foreign key constraint. It is safe to set the
+ ** flag in the second case as if any REPLACE constraint is hit, an
+ ** OP_Delete or OP_IdxDelete instruction will be executed on each
+ ** cursor that is disturbed. And these instructions both clear the
+ ** VdbeCursor.seekResult variable, disabling the OPFLAG_USESEEKRESULT
+ ** functionality. */
+ bUseSeek = (isReplace==0 || (pTrigger==0 &&
+ ((db->flags & SQLITE_ForeignKeys)==0 || sqlite3FkReferences(pTab)==0)
+ ));
sqlite3CompleteInsertion(pParse, pTab, iDataCur, iIdxCur,
- regIns, aRegIdx, 0, appendFlag, isReplace==0);
+ regIns, aRegIdx, 0, appendFlag, bUseSeek
+ );
}
}
@@ -108392,14 +109319,6 @@ SQLITE_PRIVATE void sqlite3Insert(
sqlite3VdbeJumpHere(v, addrInsTop);
}
- if( !IsVirtual(pTab) && !isView ){
- /* Close all tables opened */
- if( iDataCur<iIdxCur ) sqlite3VdbeAddOp1(v, OP_Close, iDataCur);
- for(idx=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, idx++){
- sqlite3VdbeAddOp1(v, OP_Close, idx+iIdxCur);
- }
- }
-
insert_end:
/* Update the sqlite_sequence table by storing the content of the
** maximum rowid counter values recorded while inserting into
@@ -108606,7 +109525,6 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
int ipkBottom = 0; /* Bottom of the rowid change constraint check */
u8 isUpdate; /* True if this is an UPDATE operation */
u8 bAffinityDone = 0; /* True if the OP_Affinity operation has been run */
- int regRowid = -1; /* Register holding ROWID value */
isUpdate = regOldData!=0;
db = pParse->db;
@@ -108661,8 +109579,9 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
case OE_Fail: {
char *zMsg = sqlite3MPrintf(db, "%s.%s", pTab->zName,
pTab->aCol[i].zName);
- sqlite3VdbeAddOp4(v, OP_HaltIfNull, SQLITE_CONSTRAINT_NOTNULL, onError,
- regNewData+1+i, zMsg, P4_DYNAMIC);
+ sqlite3VdbeAddOp3(v, OP_HaltIfNull, SQLITE_CONSTRAINT_NOTNULL, onError,
+ regNewData+1+i);
+ sqlite3VdbeAppendP4(v, zMsg, P4_DYNAMIC);
sqlite3VdbeChangeP5(v, P5_ConstraintNotNull);
VdbeCoverage(v);
break;
@@ -108726,7 +109645,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
}
if( isUpdate ){
- /* pkChng!=0 does not mean that the rowid has change, only that
+ /* pkChng!=0 does not mean that the rowid has changed, only that
** it might have changed. Skip the conflict logic below if the rowid
** is unchanged. */
sqlite3VdbeAddOp3(v, OP_Eq, regNewData, addrRowidOk, regOldData);
@@ -108804,7 +109723,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
** OP_Insert replace the existing entry than it is to delete the
** existing entry and then insert a new one. */
sqlite3VdbeAddOp2(v, OP_Delete, iDataCur, OPFLAG_ISNOOP);
- sqlite3VdbeChangeP4(v, -1, (char *)pTab, P4_TABLE);
+ sqlite3VdbeAppendP4(v, pTab, P4_TABLE);
}
#endif /* SQLITE_ENABLE_PREUPDATE_HOOK */
if( pTab->pIndex ){
@@ -108861,7 +109780,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
/* Create a record for this index entry as it should appear after
** the insert or update. Store that record in the aRegIdx[ix] register
*/
- regIdx = sqlite3GetTempRange(pParse, pIdx->nColumn);
+ regIdx = aRegIdx[ix]+1;
for(i=0; i<pIdx->nColumn; i++){
int iField = pIdx->aiColumn[i];
int x;
@@ -108872,9 +109791,7 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
VdbeComment((v, "%s column %d", pIdx->zName, i));
}else{
if( iField==XN_ROWID || iField==pTab->iPKey ){
- if( regRowid==regIdx+i ) continue; /* ROWID already in regIdx+i */
x = regNewData;
- regRowid = pIdx->pPartIdxWhere ? -1 : regIdx+i;
}else{
x = iField + regNewData + 1;
}
@@ -108884,7 +109801,6 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
}
sqlite3VdbeAddOp3(v, OP_MakeRecord, regIdx, pIdx->nColumn, aRegIdx[ix]);
VdbeComment((v, "for %s", pIdx->zName));
- sqlite3ExprCacheAffinityChange(pParse, regIdx, pIdx->nColumn);
/* In an UPDATE operation, if this index is the PRIMARY KEY index
** of a WITHOUT ROWID table and there has been no change the
@@ -108898,7 +109814,6 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
/* Find out what action to take in case there is a uniqueness conflict */
onError = pIdx->onError;
if( onError==OE_None ){
- sqlite3ReleaseTempRange(pParse, regIdx, pIdx->nColumn);
sqlite3VdbeResolveLabel(v, addrUniqueOk);
continue; /* pIdx is not a UNIQUE index */
}
@@ -108907,7 +109822,26 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
}else if( onError==OE_Default ){
onError = OE_Abort;
}
-
+
+ /* Collision detection may be omitted if all of the following are true:
+ ** (1) The conflict resolution algorithm is REPLACE
+ ** (2) The table is a WITHOUT ROWID table
+ ** (3) There are no secondary indexes on the table
+ ** (4) No delete triggers need to be fired if there is a conflict
+ ** (5) No FK constraint counters need to be updated if a conflict occurs.
+ */
+ if( (ix==0 && pIdx->pNext==0) /* Condition 3 */
+ && pPk==pIdx /* Condition 2 */
+ && onError==OE_Replace /* Condition 1 */
+ && ( 0==(db->flags&SQLITE_RecTriggers) || /* Condition 4 */
+ 0==sqlite3TriggersExist(pParse, pTab, TK_DELETE, 0, 0))
+ && ( 0==(db->flags&SQLITE_ForeignKeys) || /* Condition 5 */
+ (0==pTab->pFKey && 0==sqlite3FkReferences(pTab)))
+ ){
+ sqlite3VdbeResolveLabel(v, addrUniqueOk);
+ continue;
+ }
+
/* Check to see if the new index entry will be unique */
sqlite3VdbeAddOp4Int(v, OP_NoConflict, iThisCur, addrUniqueOk,
regIdx, pIdx->nKeyCol); VdbeCoverage(v);
@@ -108991,13 +109925,12 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
}
sqlite3GenerateRowDelete(pParse, pTab, pTrigger, iDataCur, iIdxCur,
regR, nPkField, 0, OE_Replace,
- (pIdx==pPk ? ONEPASS_SINGLE : ONEPASS_OFF), -1);
+ (pIdx==pPk ? ONEPASS_SINGLE : ONEPASS_OFF), iThisCur);
seenReplace = 1;
break;
}
}
sqlite3VdbeResolveLabel(v, addrUniqueOk);
- sqlite3ReleaseTempRange(pParse, regIdx, pIdx->nColumn);
if( regR!=regIdx ) sqlite3ReleaseTempRange(pParse, regR, nPkField);
}
if( ipkTop ){
@@ -109009,6 +109942,25 @@ SQLITE_PRIVATE void sqlite3GenerateConstraintChecks(
VdbeModuleComment((v, "END: GenCnstCks(%d)", seenReplace));
}
+#ifdef SQLITE_ENABLE_NULL_TRIM
+/*
+** Change the P5 operand on the last opcode (which should be an OP_MakeRecord)
+** to be the number of columns in table pTab that must not be NULL-trimmed.
+**
+** Or if no columns of pTab may be NULL-trimmed, leave P5 at zero.
+*/
+SQLITE_PRIVATE void sqlite3SetMakeRecordP5(Vdbe *v, Table *pTab){
+ u16 i;
+
+ /* Records with omitted columns are only allowed for schema format
+ ** version 2 and later (SQLite version 3.1.4, 2005-02-20). */
+ if( pTab->pSchema->file_format<2 ) return;
+
+ for(i=pTab->nCol; i>1 && pTab->aCol[i-1].pDflt==0; i--){}
+ sqlite3VdbeChangeP5(v, i);
+}
+#endif
+
/*
** This routine generates code to finish the INSERT or UPDATE operation
** that was started by a prior call to sqlite3GenerateConstraintChecks.
@@ -109025,7 +109977,7 @@ SQLITE_PRIVATE void sqlite3CompleteInsertion(
int iIdxCur, /* First index cursor */
int regNewData, /* Range of content */
int *aRegIdx, /* Register used by each index. 0 for unused indices */
- int isUpdate, /* True for UPDATE, False for INSERT */
+ int update_flags, /* True for UPDATE, False for INSERT */
int appendBias, /* True if this is likely to be an append */
int useSeekResult /* True to set the USESEEKRESULT flag on OP_[Idx]Insert */
){
@@ -109037,6 +109989,11 @@ SQLITE_PRIVATE void sqlite3CompleteInsertion(
int i; /* Loop counter */
u8 bAffinityDone = 0; /* True if OP_Affinity has been run already */
+ assert( update_flags==0
+ || update_flags==OPFLAG_ISUPDATE
+ || update_flags==(OPFLAG_ISUPDATE|OPFLAG_SAVEPOSITION)
+ );
+
v = sqlite3GetVdbe(pParse);
assert( v!=0 );
assert( pTab->pSelect==0 ); /* This table is not a VIEW */
@@ -109047,26 +110004,39 @@ SQLITE_PRIVATE void sqlite3CompleteInsertion(
sqlite3VdbeAddOp2(v, OP_IsNull, aRegIdx[i], sqlite3VdbeCurrentAddr(v)+2);
VdbeCoverage(v);
}
- sqlite3VdbeAddOp2(v, OP_IdxInsert, iIdxCur+i, aRegIdx[i]);
- pik_flags = 0;
- if( useSeekResult ) pik_flags = OPFLAG_USESEEKRESULT;
+ pik_flags = (useSeekResult ? OPFLAG_USESEEKRESULT : 0);
if( IsPrimaryKeyIndex(pIdx) && !HasRowid(pTab) ){
assert( pParse->nested==0 );
pik_flags |= OPFLAG_NCHANGE;
+ pik_flags |= (update_flags & OPFLAG_SAVEPOSITION);
+#ifdef SQLITE_ENABLE_PREUPDATE_HOOK
+ if( update_flags==0 ){
+ sqlite3VdbeAddOp4(v, OP_InsertInt,
+ iIdxCur+i, aRegIdx[i], 0, (char*)pTab, P4_TABLE
+ );
+ sqlite3VdbeChangeP5(v, OPFLAG_ISNOOP);
+ }
+#endif
}
+ sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iIdxCur+i, aRegIdx[i],
+ aRegIdx[i]+1,
+ pIdx->uniqNotNull ? pIdx->nKeyCol: pIdx->nColumn);
sqlite3VdbeChangeP5(v, pik_flags);
}
if( !HasRowid(pTab) ) return;
regData = regNewData + 1;
regRec = sqlite3GetTempReg(pParse);
sqlite3VdbeAddOp3(v, OP_MakeRecord, regData, pTab->nCol, regRec);
- if( !bAffinityDone ) sqlite3TableAffinity(v, pTab, 0);
- sqlite3ExprCacheAffinityChange(pParse, regData, pTab->nCol);
+ sqlite3SetMakeRecordP5(v, pTab);
+ if( !bAffinityDone ){
+ sqlite3TableAffinity(v, pTab, 0);
+ sqlite3ExprCacheAffinityChange(pParse, regData, pTab->nCol);
+ }
if( pParse->nested ){
pik_flags = 0;
}else{
pik_flags = OPFLAG_NCHANGE;
- pik_flags |= (isUpdate?OPFLAG_ISUPDATE:OPFLAG_LASTROWID);
+ pik_flags |= (update_flags?update_flags:OPFLAG_LASTROWID);
}
if( appendBias ){
pik_flags |= OPFLAG_APPEND;
@@ -109076,7 +110046,7 @@ SQLITE_PRIVATE void sqlite3CompleteInsertion(
}
sqlite3VdbeAddOp3(v, OP_Insert, iDataCur, regRec, regNewData);
if( !pParse->nested ){
- sqlite3VdbeChangeP4(v, -1, (char *)pTab, P4_TABLE);
+ sqlite3VdbeAppendP4(v, pTab, P4_TABLE);
}
sqlite3VdbeChangeP5(v, pik_flags);
}
@@ -109459,6 +110429,7 @@ static int xferOptimization(
sqlite3VdbeJumpHere(v, addr1);
}
if( HasRowid(pSrc) ){
+ u8 insFlags;
sqlite3OpenTable(pParse, iSrc, iDbSrc, pSrc, OP_OpenRead);
emptySrcTest = sqlite3VdbeAddOp2(v, OP_Rewind, iSrc, 0); VdbeCoverage(v);
if( pDest->iPKey>=0 ){
@@ -109474,10 +110445,17 @@ static int xferOptimization(
addr1 = sqlite3VdbeAddOp2(v, OP_Rowid, iSrc, regRowid);
assert( (pDest->tabFlags & TF_Autoincrement)==0 );
}
- sqlite3VdbeAddOp2(v, OP_RowData, iSrc, regData);
+ sqlite3VdbeAddOp3(v, OP_RowData, iSrc, regData, 1);
+ if( db->flags & SQLITE_Vacuum ){
+ sqlite3VdbeAddOp3(v, OP_Last, iDest, 0, -1);
+ insFlags = OPFLAG_NCHANGE|OPFLAG_LASTROWID|
+ OPFLAG_APPEND|OPFLAG_USESEEKRESULT;
+ }else{
+ insFlags = OPFLAG_NCHANGE|OPFLAG_LASTROWID|OPFLAG_APPEND;
+ }
sqlite3VdbeAddOp4(v, OP_Insert, iDest, regData, regRowid,
(char*)pDest, P4_TABLE);
- sqlite3VdbeChangeP5(v, OPFLAG_NCHANGE|OPFLAG_LASTROWID|OPFLAG_APPEND);
+ sqlite3VdbeChangeP5(v, insFlags);
sqlite3VdbeAddOp2(v, OP_Next, iSrc, addr1); VdbeCoverage(v);
sqlite3VdbeAddOp2(v, OP_Close, iSrc, 0);
sqlite3VdbeAddOp2(v, OP_Close, iDest, 0);
@@ -109499,7 +110477,7 @@ static int xferOptimization(
sqlite3VdbeChangeP5(v, OPFLAG_BULKCSR);
VdbeComment((v, "%s", pDestIdx->zName));
addr1 = sqlite3VdbeAddOp2(v, OP_Rewind, iSrc, 0); VdbeCoverage(v);
- sqlite3VdbeAddOp2(v, OP_RowKey, iSrc, regData);
+ sqlite3VdbeAddOp3(v, OP_RowData, iSrc, regData, 1);
if( db->flags & SQLITE_Vacuum ){
/* This INSERT command is part of a VACUUM operation, which guarantees
** that the destination table is empty. If all indexed columns use
@@ -109529,8 +110507,8 @@ static int xferOptimization(
if( !HasRowid(pSrc) && pDestIdx->idxType==2 ){
idxInsFlags |= OPFLAG_NCHANGE;
}
- sqlite3VdbeAddOp3(v, OP_IdxInsert, iDest, regData, 1);
- sqlite3VdbeChangeP5(v, idxInsFlags);
+ sqlite3VdbeAddOp2(v, OP_IdxInsert, iDest, regData);
+ sqlite3VdbeChangeP5(v, idxInsFlags|OPFLAG_APPEND);
sqlite3VdbeAddOp2(v, OP_Next, iSrc, addr1+1); VdbeCoverage(v);
sqlite3VdbeJumpHere(v, addr1);
sqlite3VdbeAddOp2(v, OP_Close, iSrc, 0);
@@ -110284,7 +111262,6 @@ typedef int (*sqlite3_loadext_entry)(
/************** End of sqlite3ext.h ******************************************/
/************** Continuing where we left off in loadext.c ********************/
/* #include "sqliteInt.h" */
-/* #include <string.h> */
#ifndef SQLITE_OMIT_LOAD_EXTENSION
/*
@@ -111093,6 +112070,8 @@ SQLITE_PRIVATE void sqlite3AutoLoadExtensions(sqlite3 *db){
** ../tool/mkpragmatab.tcl. To update the set of pragmas, edit
** that script and rerun it.
*/
+
+/* The various pragma types */
#define PragTyp_HEADER_VALUE 0
#define PragTyp_AUTO_VACUUM 1
#define PragTyp_FLAG 2
@@ -111136,419 +112115,560 @@ SQLITE_PRIVATE void sqlite3AutoLoadExtensions(sqlite3 *db){
#define PragTyp_REKEY 40
#define PragTyp_LOCK_STATUS 41
#define PragTyp_PARSER_TRACE 42
-#define PragFlag_NeedSchema 0x01
-#define PragFlag_ReadOnly 0x02
-static const struct sPragmaNames {
- const char *const zName; /* Name of pragma */
- u8 ePragTyp; /* PragTyp_XXX value */
- u8 mPragFlag; /* Zero or more PragFlag_XXX values */
- u32 iArg; /* Extra argument */
-} aPragmaNames[] = {
+
+/* Property flags associated with various pragma. */
+#define PragFlg_NeedSchema 0x01 /* Force schema load before running */
+#define PragFlg_NoColumns 0x02 /* OP_ResultRow called with zero columns */
+#define PragFlg_NoColumns1 0x04 /* zero columns if RHS argument is present */
+#define PragFlg_ReadOnly 0x08 /* Read-only HEADER_VALUE */
+#define PragFlg_Result0 0x10 /* Acts as query when no argument */
+#define PragFlg_Result1 0x20 /* Acts as query when has one argument */
+#define PragFlg_SchemaOpt 0x40 /* Schema restricts name search if present */
+#define PragFlg_SchemaReq 0x80 /* Schema required - "main" is default */
+
+/* Names of columns for pragmas that return multi-column result
+** or that return single-column results where the name of the
+** result column is different from the name of the pragma
+*/
+static const char *const pragCName[] = {
+ /* 0 */ "cache_size", /* Used by: default_cache_size */
+ /* 1 */ "cid", /* Used by: table_info */
+ /* 2 */ "name",
+ /* 3 */ "type",
+ /* 4 */ "notnull",
+ /* 5 */ "dflt_value",
+ /* 6 */ "pk",
+ /* 7 */ "table", /* Used by: stats */
+ /* 8 */ "index",
+ /* 9 */ "width",
+ /* 10 */ "height",
+ /* 11 */ "seqno", /* Used by: index_info */
+ /* 12 */ "cid",
+ /* 13 */ "name",
+ /* 14 */ "seqno", /* Used by: index_xinfo */
+ /* 15 */ "cid",
+ /* 16 */ "name",
+ /* 17 */ "desc",
+ /* 18 */ "coll",
+ /* 19 */ "key",
+ /* 20 */ "seq", /* Used by: index_list */
+ /* 21 */ "name",
+ /* 22 */ "unique",
+ /* 23 */ "origin",
+ /* 24 */ "partial",
+ /* 25 */ "seq", /* Used by: database_list */
+ /* 26 */ "name",
+ /* 27 */ "file",
+ /* 28 */ "seq", /* Used by: collation_list */
+ /* 29 */ "name",
+ /* 30 */ "id", /* Used by: foreign_key_list */
+ /* 31 */ "seq",
+ /* 32 */ "table",
+ /* 33 */ "from",
+ /* 34 */ "to",
+ /* 35 */ "on_update",
+ /* 36 */ "on_delete",
+ /* 37 */ "match",
+ /* 38 */ "table", /* Used by: foreign_key_check */
+ /* 39 */ "rowid",
+ /* 40 */ "parent",
+ /* 41 */ "fkid",
+ /* 42 */ "busy", /* Used by: wal_checkpoint */
+ /* 43 */ "log",
+ /* 44 */ "checkpointed",
+ /* 45 */ "timeout", /* Used by: busy_timeout */
+ /* 46 */ "database", /* Used by: lock_status */
+ /* 47 */ "status",
+};
+
+/* Definitions of all built-in pragmas */
+typedef struct PragmaName {
+ const char *const zName; /* Name of pragma */
+ u8 ePragTyp; /* PragTyp_XXX value */
+ u8 mPragFlg; /* Zero or more PragFlg_XXX values */
+ u8 iPragCName; /* Start of column names in pragCName[] */
+ u8 nPragCName; /* Num of col names. 0 means use pragma name */
+ u32 iArg; /* Extra argument */
+} PragmaName;
+static const PragmaName aPragmaName[] = {
#if defined(SQLITE_HAS_CODEC) || defined(SQLITE_ENABLE_CEROD)
- { /* zName: */ "activate_extensions",
- /* ePragTyp: */ PragTyp_ACTIVATE_EXTENSIONS,
- /* ePragFlag: */ 0,
- /* iArg: */ 0 },
+ {/* zName: */ "activate_extensions",
+ /* ePragTyp: */ PragTyp_ACTIVATE_EXTENSIONS,
+ /* ePragFlg: */ 0,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
#endif
#if !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS)
- { /* zName: */ "application_id",
- /* ePragTyp: */ PragTyp_HEADER_VALUE,
- /* ePragFlag: */ 0,
- /* iArg: */ BTREE_APPLICATION_ID },
+ {/* zName: */ "application_id",
+ /* ePragTyp: */ PragTyp_HEADER_VALUE,
+ /* ePragFlg: */ PragFlg_NoColumns1|PragFlg_Result0,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ BTREE_APPLICATION_ID },
#endif
#if !defined(SQLITE_OMIT_AUTOVACUUM)
- { /* zName: */ "auto_vacuum",
- /* ePragTyp: */ PragTyp_AUTO_VACUUM,
- /* ePragFlag: */ PragFlag_NeedSchema,
- /* iArg: */ 0 },
+ {/* zName: */ "auto_vacuum",
+ /* ePragTyp: */ PragTyp_AUTO_VACUUM,
+ /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
#endif
#if !defined(SQLITE_OMIT_FLAG_PRAGMAS)
#if !defined(SQLITE_OMIT_AUTOMATIC_INDEX)
- { /* zName: */ "automatic_index",
- /* ePragTyp: */ PragTyp_FLAG,
- /* ePragFlag: */ 0,
- /* iArg: */ SQLITE_AutoIndex },
-#endif
-#endif
- { /* zName: */ "busy_timeout",
- /* ePragTyp: */ PragTyp_BUSY_TIMEOUT,
- /* ePragFlag: */ 0,
- /* iArg: */ 0 },
+ {/* zName: */ "automatic_index",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_AutoIndex },
+#endif
+#endif
+ {/* zName: */ "busy_timeout",
+ /* ePragTyp: */ PragTyp_BUSY_TIMEOUT,
+ /* ePragFlg: */ PragFlg_Result0,
+ /* ColNames: */ 45, 1,
+ /* iArg: */ 0 },
#if !defined(SQLITE_OMIT_PAGER_PRAGMAS)
- { /* zName: */ "cache_size",
- /* ePragTyp: */ PragTyp_CACHE_SIZE,
- /* ePragFlag: */ PragFlag_NeedSchema,
- /* iArg: */ 0 },
+ {/* zName: */ "cache_size",
+ /* ePragTyp: */ PragTyp_CACHE_SIZE,
+ /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
#endif
#if !defined(SQLITE_OMIT_FLAG_PRAGMAS)
- { /* zName: */ "cache_spill",
- /* ePragTyp: */ PragTyp_CACHE_SPILL,
- /* ePragFlag: */ 0,
- /* iArg: */ 0 },
-#endif
- { /* zName: */ "case_sensitive_like",
- /* ePragTyp: */ PragTyp_CASE_SENSITIVE_LIKE,
- /* ePragFlag: */ 0,
- /* iArg: */ 0 },
- { /* zName: */ "cell_size_check",
- /* ePragTyp: */ PragTyp_FLAG,
- /* ePragFlag: */ 0,
- /* iArg: */ SQLITE_CellSizeCk },
+ {/* zName: */ "cache_spill",
+ /* ePragTyp: */ PragTyp_CACHE_SPILL,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_SchemaReq|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+#endif
+ {/* zName: */ "case_sensitive_like",
+ /* ePragTyp: */ PragTyp_CASE_SENSITIVE_LIKE,
+ /* ePragFlg: */ PragFlg_NoColumns,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+ {/* zName: */ "cell_size_check",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_CellSizeCk },
#if !defined(SQLITE_OMIT_FLAG_PRAGMAS)
- { /* zName: */ "checkpoint_fullfsync",
- /* ePragTyp: */ PragTyp_FLAG,
- /* ePragFlag: */ 0,
- /* iArg: */ SQLITE_CkptFullFSync },
+ {/* zName: */ "checkpoint_fullfsync",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_CkptFullFSync },
#endif
#if !defined(SQLITE_OMIT_SCHEMA_PRAGMAS)
- { /* zName: */ "collation_list",
- /* ePragTyp: */ PragTyp_COLLATION_LIST,
- /* ePragFlag: */ 0,
- /* iArg: */ 0 },
+ {/* zName: */ "collation_list",
+ /* ePragTyp: */ PragTyp_COLLATION_LIST,
+ /* ePragFlg: */ PragFlg_Result0,
+ /* ColNames: */ 28, 2,
+ /* iArg: */ 0 },
#endif
#if !defined(SQLITE_OMIT_COMPILEOPTION_DIAGS)
- { /* zName: */ "compile_options",
- /* ePragTyp: */ PragTyp_COMPILE_OPTIONS,
- /* ePragFlag: */ 0,
- /* iArg: */ 0 },
+ {/* zName: */ "compile_options",
+ /* ePragTyp: */ PragTyp_COMPILE_OPTIONS,
+ /* ePragFlg: */ PragFlg_Result0,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
#endif
#if !defined(SQLITE_OMIT_FLAG_PRAGMAS)
- { /* zName: */ "count_changes",
- /* ePragTyp: */ PragTyp_FLAG,
- /* ePragFlag: */ 0,
- /* iArg: */ SQLITE_CountRows },
+ {/* zName: */ "count_changes",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_CountRows },
#endif
#if !defined(SQLITE_OMIT_PAGER_PRAGMAS) && SQLITE_OS_WIN
- { /* zName: */ "data_store_directory",
- /* ePragTyp: */ PragTyp_DATA_STORE_DIRECTORY,
- /* ePragFlag: */ 0,
- /* iArg: */ 0 },
+ {/* zName: */ "data_store_directory",
+ /* ePragTyp: */ PragTyp_DATA_STORE_DIRECTORY,
+ /* ePragFlg: */ PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
#endif
#if !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS)
- { /* zName: */ "data_version",
- /* ePragTyp: */ PragTyp_HEADER_VALUE,
- /* ePragFlag: */ PragFlag_ReadOnly,
- /* iArg: */ BTREE_DATA_VERSION },
+ {/* zName: */ "data_version",
+ /* ePragTyp: */ PragTyp_HEADER_VALUE,
+ /* ePragFlg: */ PragFlg_ReadOnly|PragFlg_Result0,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ BTREE_DATA_VERSION },
#endif
#if !defined(SQLITE_OMIT_SCHEMA_PRAGMAS)
- { /* zName: */ "database_list",
- /* ePragTyp: */ PragTyp_DATABASE_LIST,
- /* ePragFlag: */ PragFlag_NeedSchema,
- /* iArg: */ 0 },
+ {/* zName: */ "database_list",
+ /* ePragTyp: */ PragTyp_DATABASE_LIST,
+ /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0,
+ /* ColNames: */ 25, 3,
+ /* iArg: */ 0 },
#endif
#if !defined(SQLITE_OMIT_PAGER_PRAGMAS) && !defined(SQLITE_OMIT_DEPRECATED)
- { /* zName: */ "default_cache_size",
- /* ePragTyp: */ PragTyp_DEFAULT_CACHE_SIZE,
- /* ePragFlag: */ PragFlag_NeedSchema,
- /* iArg: */ 0 },
+ {/* zName: */ "default_cache_size",
+ /* ePragTyp: */ PragTyp_DEFAULT_CACHE_SIZE,
+ /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 1,
+ /* iArg: */ 0 },
#endif
#if !defined(SQLITE_OMIT_FLAG_PRAGMAS)
#if !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER)
- { /* zName: */ "defer_foreign_keys",
- /* ePragTyp: */ PragTyp_FLAG,
- /* ePragFlag: */ 0,
- /* iArg: */ SQLITE_DeferFKs },
+ {/* zName: */ "defer_foreign_keys",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_DeferFKs },
#endif
#endif
#if !defined(SQLITE_OMIT_FLAG_PRAGMAS)
- { /* zName: */ "empty_result_callbacks",
- /* ePragTyp: */ PragTyp_FLAG,
- /* ePragFlag: */ 0,
- /* iArg: */ SQLITE_NullCallback },
+ {/* zName: */ "empty_result_callbacks",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_NullCallback },
#endif
#if !defined(SQLITE_OMIT_UTF16)
- { /* zName: */ "encoding",
- /* ePragTyp: */ PragTyp_ENCODING,
- /* ePragFlag: */ 0,
- /* iArg: */ 0 },
+ {/* zName: */ "encoding",
+ /* ePragTyp: */ PragTyp_ENCODING,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
#endif
#if !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER)
- { /* zName: */ "foreign_key_check",
- /* ePragTyp: */ PragTyp_FOREIGN_KEY_CHECK,
- /* ePragFlag: */ PragFlag_NeedSchema,
- /* iArg: */ 0 },
+ {/* zName: */ "foreign_key_check",
+ /* ePragTyp: */ PragTyp_FOREIGN_KEY_CHECK,
+ /* ePragFlg: */ PragFlg_NeedSchema,
+ /* ColNames: */ 38, 4,
+ /* iArg: */ 0 },
#endif
#if !defined(SQLITE_OMIT_FOREIGN_KEY)
- { /* zName: */ "foreign_key_list",
- /* ePragTyp: */ PragTyp_FOREIGN_KEY_LIST,
- /* ePragFlag: */ PragFlag_NeedSchema,
- /* iArg: */ 0 },
+ {/* zName: */ "foreign_key_list",
+ /* ePragTyp: */ PragTyp_FOREIGN_KEY_LIST,
+ /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result1|PragFlg_SchemaOpt,
+ /* ColNames: */ 30, 8,
+ /* iArg: */ 0 },
#endif
#if !defined(SQLITE_OMIT_FLAG_PRAGMAS)
#if !defined(SQLITE_OMIT_FOREIGN_KEY) && !defined(SQLITE_OMIT_TRIGGER)
- { /* zName: */ "foreign_keys",
- /* ePragTyp: */ PragTyp_FLAG,
- /* ePragFlag: */ 0,
- /* iArg: */ SQLITE_ForeignKeys },
+ {/* zName: */ "foreign_keys",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_ForeignKeys },
#endif
#endif
#if !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS)
- { /* zName: */ "freelist_count",
- /* ePragTyp: */ PragTyp_HEADER_VALUE,
- /* ePragFlag: */ PragFlag_ReadOnly,
- /* iArg: */ BTREE_FREE_PAGE_COUNT },
+ {/* zName: */ "freelist_count",
+ /* ePragTyp: */ PragTyp_HEADER_VALUE,
+ /* ePragFlg: */ PragFlg_ReadOnly|PragFlg_Result0,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ BTREE_FREE_PAGE_COUNT },
#endif
#if !defined(SQLITE_OMIT_FLAG_PRAGMAS)
- { /* zName: */ "full_column_names",
- /* ePragTyp: */ PragTyp_FLAG,
- /* ePragFlag: */ 0,
- /* iArg: */ SQLITE_FullColNames },
- { /* zName: */ "fullfsync",
- /* ePragTyp: */ PragTyp_FLAG,
- /* ePragFlag: */ 0,
- /* iArg: */ SQLITE_FullFSync },
+ {/* zName: */ "full_column_names",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_FullColNames },
+ {/* zName: */ "fullfsync",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_FullFSync },
#endif
#if defined(SQLITE_HAS_CODEC)
- { /* zName: */ "hexkey",
- /* ePragTyp: */ PragTyp_HEXKEY,
- /* ePragFlag: */ 0,
- /* iArg: */ 0 },
- { /* zName: */ "hexrekey",
- /* ePragTyp: */ PragTyp_HEXKEY,
- /* ePragFlag: */ 0,
- /* iArg: */ 0 },
+ {/* zName: */ "hexkey",
+ /* ePragTyp: */ PragTyp_HEXKEY,
+ /* ePragFlg: */ 0,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+ {/* zName: */ "hexrekey",
+ /* ePragTyp: */ PragTyp_HEXKEY,
+ /* ePragFlg: */ 0,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
#endif
#if !defined(SQLITE_OMIT_FLAG_PRAGMAS)
#if !defined(SQLITE_OMIT_CHECK)
- { /* zName: */ "ignore_check_constraints",
- /* ePragTyp: */ PragTyp_FLAG,
- /* ePragFlag: */ 0,
- /* iArg: */ SQLITE_IgnoreChecks },
+ {/* zName: */ "ignore_check_constraints",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_IgnoreChecks },
#endif
#endif
#if !defined(SQLITE_OMIT_AUTOVACUUM)
- { /* zName: */ "incremental_vacuum",
- /* ePragTyp: */ PragTyp_INCREMENTAL_VACUUM,
- /* ePragFlag: */ PragFlag_NeedSchema,
- /* iArg: */ 0 },
+ {/* zName: */ "incremental_vacuum",
+ /* ePragTyp: */ PragTyp_INCREMENTAL_VACUUM,
+ /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_NoColumns,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
#endif
#if !defined(SQLITE_OMIT_SCHEMA_PRAGMAS)
- { /* zName: */ "index_info",
- /* ePragTyp: */ PragTyp_INDEX_INFO,
- /* ePragFlag: */ PragFlag_NeedSchema,
- /* iArg: */ 0 },
- { /* zName: */ "index_list",
- /* ePragTyp: */ PragTyp_INDEX_LIST,
- /* ePragFlag: */ PragFlag_NeedSchema,
- /* iArg: */ 0 },
- { /* zName: */ "index_xinfo",
- /* ePragTyp: */ PragTyp_INDEX_INFO,
- /* ePragFlag: */ PragFlag_NeedSchema,
- /* iArg: */ 1 },
+ {/* zName: */ "index_info",
+ /* ePragTyp: */ PragTyp_INDEX_INFO,
+ /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result1|PragFlg_SchemaOpt,
+ /* ColNames: */ 11, 3,
+ /* iArg: */ 0 },
+ {/* zName: */ "index_list",
+ /* ePragTyp: */ PragTyp_INDEX_LIST,
+ /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result1|PragFlg_SchemaOpt,
+ /* ColNames: */ 20, 5,
+ /* iArg: */ 0 },
+ {/* zName: */ "index_xinfo",
+ /* ePragTyp: */ PragTyp_INDEX_INFO,
+ /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result1|PragFlg_SchemaOpt,
+ /* ColNames: */ 14, 6,
+ /* iArg: */ 1 },
#endif
#if !defined(SQLITE_OMIT_INTEGRITY_CHECK)
- { /* zName: */ "integrity_check",
- /* ePragTyp: */ PragTyp_INTEGRITY_CHECK,
- /* ePragFlag: */ PragFlag_NeedSchema,
- /* iArg: */ 0 },
+ {/* zName: */ "integrity_check",
+ /* ePragTyp: */ PragTyp_INTEGRITY_CHECK,
+ /* ePragFlg: */ PragFlg_NeedSchema,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
#endif
#if !defined(SQLITE_OMIT_PAGER_PRAGMAS)
- { /* zName: */ "journal_mode",
- /* ePragTyp: */ PragTyp_JOURNAL_MODE,
- /* ePragFlag: */ PragFlag_NeedSchema,
- /* iArg: */ 0 },
- { /* zName: */ "journal_size_limit",
- /* ePragTyp: */ PragTyp_JOURNAL_SIZE_LIMIT,
- /* ePragFlag: */ 0,
- /* iArg: */ 0 },
+ {/* zName: */ "journal_mode",
+ /* ePragTyp: */ PragTyp_JOURNAL_MODE,
+ /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+ {/* zName: */ "journal_size_limit",
+ /* ePragTyp: */ PragTyp_JOURNAL_SIZE_LIMIT,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_SchemaReq,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
#endif
#if defined(SQLITE_HAS_CODEC)
- { /* zName: */ "key",
- /* ePragTyp: */ PragTyp_KEY,
- /* ePragFlag: */ 0,
- /* iArg: */ 0 },
+ {/* zName: */ "key",
+ /* ePragTyp: */ PragTyp_KEY,
+ /* ePragFlg: */ 0,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
#endif
#if !defined(SQLITE_OMIT_FLAG_PRAGMAS)
- { /* zName: */ "legacy_file_format",
- /* ePragTyp: */ PragTyp_FLAG,
- /* ePragFlag: */ 0,
- /* iArg: */ SQLITE_LegacyFileFmt },
+ {/* zName: */ "legacy_file_format",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_LegacyFileFmt },
#endif
#if !defined(SQLITE_OMIT_PAGER_PRAGMAS) && SQLITE_ENABLE_LOCKING_STYLE
- { /* zName: */ "lock_proxy_file",
- /* ePragTyp: */ PragTyp_LOCK_PROXY_FILE,
- /* ePragFlag: */ 0,
- /* iArg: */ 0 },
+ {/* zName: */ "lock_proxy_file",
+ /* ePragTyp: */ PragTyp_LOCK_PROXY_FILE,
+ /* ePragFlg: */ PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
#endif
#if defined(SQLITE_DEBUG) || defined(SQLITE_TEST)
- { /* zName: */ "lock_status",
- /* ePragTyp: */ PragTyp_LOCK_STATUS,
- /* ePragFlag: */ 0,
- /* iArg: */ 0 },
+ {/* zName: */ "lock_status",
+ /* ePragTyp: */ PragTyp_LOCK_STATUS,
+ /* ePragFlg: */ PragFlg_Result0,
+ /* ColNames: */ 46, 2,
+ /* iArg: */ 0 },
#endif
#if !defined(SQLITE_OMIT_PAGER_PRAGMAS)
- { /* zName: */ "locking_mode",
- /* ePragTyp: */ PragTyp_LOCKING_MODE,
- /* ePragFlag: */ 0,
- /* iArg: */ 0 },
- { /* zName: */ "max_page_count",
- /* ePragTyp: */ PragTyp_PAGE_COUNT,
- /* ePragFlag: */ PragFlag_NeedSchema,
- /* iArg: */ 0 },
- { /* zName: */ "mmap_size",
- /* ePragTyp: */ PragTyp_MMAP_SIZE,
- /* ePragFlag: */ 0,
- /* iArg: */ 0 },
- { /* zName: */ "page_count",
- /* ePragTyp: */ PragTyp_PAGE_COUNT,
- /* ePragFlag: */ PragFlag_NeedSchema,
- /* iArg: */ 0 },
- { /* zName: */ "page_size",
- /* ePragTyp: */ PragTyp_PAGE_SIZE,
- /* ePragFlag: */ 0,
- /* iArg: */ 0 },
+ {/* zName: */ "locking_mode",
+ /* ePragTyp: */ PragTyp_LOCKING_MODE,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_SchemaReq,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+ {/* zName: */ "max_page_count",
+ /* ePragTyp: */ PragTyp_PAGE_COUNT,
+ /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+ {/* zName: */ "mmap_size",
+ /* ePragTyp: */ PragTyp_MMAP_SIZE,
+ /* ePragFlg: */ 0,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+ {/* zName: */ "page_count",
+ /* ePragTyp: */ PragTyp_PAGE_COUNT,
+ /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+ {/* zName: */ "page_size",
+ /* ePragTyp: */ PragTyp_PAGE_SIZE,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_SchemaReq|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
#endif
#if defined(SQLITE_DEBUG) && !defined(SQLITE_OMIT_PARSER_TRACE)
- { /* zName: */ "parser_trace",
- /* ePragTyp: */ PragTyp_PARSER_TRACE,
- /* ePragFlag: */ 0,
- /* iArg: */ 0 },
+ {/* zName: */ "parser_trace",
+ /* ePragTyp: */ PragTyp_PARSER_TRACE,
+ /* ePragFlg: */ 0,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
#endif
#if !defined(SQLITE_OMIT_FLAG_PRAGMAS)
- { /* zName: */ "query_only",
- /* ePragTyp: */ PragTyp_FLAG,
- /* ePragFlag: */ 0,
- /* iArg: */ SQLITE_QueryOnly },
+ {/* zName: */ "query_only",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_QueryOnly },
#endif
#if !defined(SQLITE_OMIT_INTEGRITY_CHECK)
- { /* zName: */ "quick_check",
- /* ePragTyp: */ PragTyp_INTEGRITY_CHECK,
- /* ePragFlag: */ PragFlag_NeedSchema,
- /* iArg: */ 0 },
+ {/* zName: */ "quick_check",
+ /* ePragTyp: */ PragTyp_INTEGRITY_CHECK,
+ /* ePragFlg: */ PragFlg_NeedSchema,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
#endif
#if !defined(SQLITE_OMIT_FLAG_PRAGMAS)
- { /* zName: */ "read_uncommitted",
- /* ePragTyp: */ PragTyp_FLAG,
- /* ePragFlag: */ 0,
- /* iArg: */ SQLITE_ReadUncommitted },
- { /* zName: */ "recursive_triggers",
- /* ePragTyp: */ PragTyp_FLAG,
- /* ePragFlag: */ 0,
- /* iArg: */ SQLITE_RecTriggers },
+ {/* zName: */ "read_uncommitted",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_ReadUncommitted },
+ {/* zName: */ "recursive_triggers",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_RecTriggers },
#endif
#if defined(SQLITE_HAS_CODEC)
- { /* zName: */ "rekey",
- /* ePragTyp: */ PragTyp_REKEY,
- /* ePragFlag: */ 0,
- /* iArg: */ 0 },
+ {/* zName: */ "rekey",
+ /* ePragTyp: */ PragTyp_REKEY,
+ /* ePragFlg: */ 0,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
#endif
#if !defined(SQLITE_OMIT_FLAG_PRAGMAS)
- { /* zName: */ "reverse_unordered_selects",
- /* ePragTyp: */ PragTyp_FLAG,
- /* ePragFlag: */ 0,
- /* iArg: */ SQLITE_ReverseOrder },
+ {/* zName: */ "reverse_unordered_selects",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_ReverseOrder },
#endif
#if !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS)
- { /* zName: */ "schema_version",
- /* ePragTyp: */ PragTyp_HEADER_VALUE,
- /* ePragFlag: */ 0,
- /* iArg: */ BTREE_SCHEMA_VERSION },
+ {/* zName: */ "schema_version",
+ /* ePragTyp: */ PragTyp_HEADER_VALUE,
+ /* ePragFlg: */ PragFlg_NoColumns1|PragFlg_Result0,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ BTREE_SCHEMA_VERSION },
#endif
#if !defined(SQLITE_OMIT_PAGER_PRAGMAS)
- { /* zName: */ "secure_delete",
- /* ePragTyp: */ PragTyp_SECURE_DELETE,
- /* ePragFlag: */ 0,
- /* iArg: */ 0 },
+ {/* zName: */ "secure_delete",
+ /* ePragTyp: */ PragTyp_SECURE_DELETE,
+ /* ePragFlg: */ PragFlg_Result0,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
#endif
#if !defined(SQLITE_OMIT_FLAG_PRAGMAS)
- { /* zName: */ "short_column_names",
- /* ePragTyp: */ PragTyp_FLAG,
- /* ePragFlag: */ 0,
- /* iArg: */ SQLITE_ShortColNames },
-#endif
- { /* zName: */ "shrink_memory",
- /* ePragTyp: */ PragTyp_SHRINK_MEMORY,
- /* ePragFlag: */ 0,
- /* iArg: */ 0 },
- { /* zName: */ "soft_heap_limit",
- /* ePragTyp: */ PragTyp_SOFT_HEAP_LIMIT,
- /* ePragFlag: */ 0,
- /* iArg: */ 0 },
+ {/* zName: */ "short_column_names",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_ShortColNames },
+#endif
+ {/* zName: */ "shrink_memory",
+ /* ePragTyp: */ PragTyp_SHRINK_MEMORY,
+ /* ePragFlg: */ PragFlg_NoColumns,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+ {/* zName: */ "soft_heap_limit",
+ /* ePragTyp: */ PragTyp_SOFT_HEAP_LIMIT,
+ /* ePragFlg: */ PragFlg_Result0,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
#if !defined(SQLITE_OMIT_FLAG_PRAGMAS)
#if defined(SQLITE_DEBUG)
- { /* zName: */ "sql_trace",
- /* ePragTyp: */ PragTyp_FLAG,
- /* ePragFlag: */ 0,
- /* iArg: */ SQLITE_SqlTrace },
+ {/* zName: */ "sql_trace",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_SqlTrace },
#endif
#endif
#if !defined(SQLITE_OMIT_SCHEMA_PRAGMAS)
- { /* zName: */ "stats",
- /* ePragTyp: */ PragTyp_STATS,
- /* ePragFlag: */ PragFlag_NeedSchema,
- /* iArg: */ 0 },
+ {/* zName: */ "stats",
+ /* ePragTyp: */ PragTyp_STATS,
+ /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq,
+ /* ColNames: */ 7, 4,
+ /* iArg: */ 0 },
#endif
#if !defined(SQLITE_OMIT_PAGER_PRAGMAS)
- { /* zName: */ "synchronous",
- /* ePragTyp: */ PragTyp_SYNCHRONOUS,
- /* ePragFlag: */ PragFlag_NeedSchema,
- /* iArg: */ 0 },
+ {/* zName: */ "synchronous",
+ /* ePragTyp: */ PragTyp_SYNCHRONOUS,
+ /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result0|PragFlg_SchemaReq|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
#endif
#if !defined(SQLITE_OMIT_SCHEMA_PRAGMAS)
- { /* zName: */ "table_info",
- /* ePragTyp: */ PragTyp_TABLE_INFO,
- /* ePragFlag: */ PragFlag_NeedSchema,
- /* iArg: */ 0 },
+ {/* zName: */ "table_info",
+ /* ePragTyp: */ PragTyp_TABLE_INFO,
+ /* ePragFlg: */ PragFlg_NeedSchema|PragFlg_Result1|PragFlg_SchemaOpt,
+ /* ColNames: */ 1, 6,
+ /* iArg: */ 0 },
#endif
#if !defined(SQLITE_OMIT_PAGER_PRAGMAS)
- { /* zName: */ "temp_store",
- /* ePragTyp: */ PragTyp_TEMP_STORE,
- /* ePragFlag: */ 0,
- /* iArg: */ 0 },
- { /* zName: */ "temp_store_directory",
- /* ePragTyp: */ PragTyp_TEMP_STORE_DIRECTORY,
- /* ePragFlag: */ 0,
- /* iArg: */ 0 },
-#endif
- { /* zName: */ "threads",
- /* ePragTyp: */ PragTyp_THREADS,
- /* ePragFlag: */ 0,
- /* iArg: */ 0 },
+ {/* zName: */ "temp_store",
+ /* ePragTyp: */ PragTyp_TEMP_STORE,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+ {/* zName: */ "temp_store_directory",
+ /* ePragTyp: */ PragTyp_TEMP_STORE_DIRECTORY,
+ /* ePragFlg: */ PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+#endif
+ {/* zName: */ "threads",
+ /* ePragTyp: */ PragTyp_THREADS,
+ /* ePragFlg: */ PragFlg_Result0,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
#if !defined(SQLITE_OMIT_SCHEMA_VERSION_PRAGMAS)
- { /* zName: */ "user_version",
- /* ePragTyp: */ PragTyp_HEADER_VALUE,
- /* ePragFlag: */ 0,
- /* iArg: */ BTREE_USER_VERSION },
+ {/* zName: */ "user_version",
+ /* ePragTyp: */ PragTyp_HEADER_VALUE,
+ /* ePragFlg: */ PragFlg_NoColumns1|PragFlg_Result0,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ BTREE_USER_VERSION },
#endif
#if !defined(SQLITE_OMIT_FLAG_PRAGMAS)
#if defined(SQLITE_DEBUG)
- { /* zName: */ "vdbe_addoptrace",
- /* ePragTyp: */ PragTyp_FLAG,
- /* ePragFlag: */ 0,
- /* iArg: */ SQLITE_VdbeAddopTrace },
- { /* zName: */ "vdbe_debug",
- /* ePragTyp: */ PragTyp_FLAG,
- /* ePragFlag: */ 0,
- /* iArg: */ SQLITE_SqlTrace|SQLITE_VdbeListing|SQLITE_VdbeTrace },
- { /* zName: */ "vdbe_eqp",
- /* ePragTyp: */ PragTyp_FLAG,
- /* ePragFlag: */ 0,
- /* iArg: */ SQLITE_VdbeEQP },
- { /* zName: */ "vdbe_listing",
- /* ePragTyp: */ PragTyp_FLAG,
- /* ePragFlag: */ 0,
- /* iArg: */ SQLITE_VdbeListing },
- { /* zName: */ "vdbe_trace",
- /* ePragTyp: */ PragTyp_FLAG,
- /* ePragFlag: */ 0,
- /* iArg: */ SQLITE_VdbeTrace },
+ {/* zName: */ "vdbe_addoptrace",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_VdbeAddopTrace },
+ {/* zName: */ "vdbe_debug",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_SqlTrace|SQLITE_VdbeListing|SQLITE_VdbeTrace },
+ {/* zName: */ "vdbe_eqp",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_VdbeEQP },
+ {/* zName: */ "vdbe_listing",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_VdbeListing },
+ {/* zName: */ "vdbe_trace",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_VdbeTrace },
#endif
#endif
#if !defined(SQLITE_OMIT_WAL)
- { /* zName: */ "wal_autocheckpoint",
- /* ePragTyp: */ PragTyp_WAL_AUTOCHECKPOINT,
- /* ePragFlag: */ 0,
- /* iArg: */ 0 },
- { /* zName: */ "wal_checkpoint",
- /* ePragTyp: */ PragTyp_WAL_CHECKPOINT,
- /* ePragFlag: */ PragFlag_NeedSchema,
- /* iArg: */ 0 },
+ {/* zName: */ "wal_autocheckpoint",
+ /* ePragTyp: */ PragTyp_WAL_AUTOCHECKPOINT,
+ /* ePragFlg: */ 0,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ 0 },
+ {/* zName: */ "wal_checkpoint",
+ /* ePragTyp: */ PragTyp_WAL_CHECKPOINT,
+ /* ePragFlg: */ PragFlg_NeedSchema,
+ /* ColNames: */ 42, 3,
+ /* iArg: */ 0 },
#endif
#if !defined(SQLITE_OMIT_FLAG_PRAGMAS)
- { /* zName: */ "writable_schema",
- /* ePragTyp: */ PragTyp_FLAG,
- /* ePragFlag: */ 0,
- /* iArg: */ SQLITE_WriteSchema|SQLITE_RecoveryMode },
+ {/* zName: */ "writable_schema",
+ /* ePragTyp: */ PragTyp_FLAG,
+ /* ePragFlg: */ PragFlg_Result0|PragFlg_NoColumns1,
+ /* ColNames: */ 0, 0,
+ /* iArg: */ SQLITE_WriteSchema|SQLITE_RecoveryMode },
#endif
};
/* Number of pragmas: 60 on by default, 73 total. */
@@ -111689,29 +112809,29 @@ static int changeTempStorage(Parse *pParse, const char *zStorageType){
#endif /* SQLITE_PAGER_PRAGMAS */
/*
-** Set the names of the first N columns to the values in azCol[]
+** Set result column names for a pragma.
*/
-static void setAllColumnNames(
- Vdbe *v, /* The query under construction */
- int N, /* Number of columns */
- const char **azCol /* Names of columns */
+static void setPragmaResultColumnNames(
+ Vdbe *v, /* The query under construction */
+ const PragmaName *pPragma /* The pragma */
){
- int i;
- sqlite3VdbeSetNumCols(v, N);
- for(i=0; i<N; i++){
- sqlite3VdbeSetColName(v, i, COLNAME_NAME, azCol[i], SQLITE_STATIC);
+ u8 n = pPragma->nPragCName;
+ sqlite3VdbeSetNumCols(v, n==0 ? 1 : n);
+ if( n==0 ){
+ sqlite3VdbeSetColName(v, 0, COLNAME_NAME, pPragma->zName, SQLITE_STATIC);
+ }else{
+ int i, j;
+ for(i=0, j=pPragma->iPragCName; i<n; i++, j++){
+ sqlite3VdbeSetColName(v, i, COLNAME_NAME, pragCName[j], SQLITE_STATIC);
+ }
}
}
-static void setOneColumnName(Vdbe *v, const char *z){
- setAllColumnNames(v, 1, &z);
-}
/*
** Generate code to return a single integer value.
*/
-static void returnSingleInt(Vdbe *v, const char *zLabel, i64 value){
+static void returnSingleInt(Vdbe *v, i64 value){
sqlite3VdbeAddOp4Dup8(v, OP_Int64, 0, 1, 0, (const u8*)&value, P4_INT64);
- setOneColumnName(v, zLabel);
sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 1);
}
@@ -111720,12 +112840,10 @@ static void returnSingleInt(Vdbe *v, const char *zLabel, i64 value){
*/
static void returnSingleText(
Vdbe *v, /* Prepared statement under construction */
- const char *zLabel, /* Name of the result column */
const char *zValue /* Value to be returned */
){
if( zValue ){
sqlite3VdbeLoadString(v, 1, (const char*)zValue);
- setOneColumnName(v, zLabel);
sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 1);
}
}
@@ -111804,6 +112922,26 @@ SQLITE_PRIVATE const char *sqlite3JournalModename(int eMode){
}
/*
+** Locate a pragma in the aPragmaName[] array.
+*/
+static const PragmaName *pragmaLocate(const char *zName){
+ int upr, lwr, mid = 0, rc;
+ lwr = 0;
+ upr = ArraySize(aPragmaName)-1;
+ while( lwr<=upr ){
+ mid = (lwr+upr)/2;
+ rc = sqlite3_stricmp(zName, aPragmaName[mid].zName);
+ if( rc==0 ) break;
+ if( rc<0 ){
+ upr = mid - 1;
+ }else{
+ lwr = mid + 1;
+ }
+ }
+ return lwr>upr ? 0 : &aPragmaName[mid];
+}
+
+/*
** Process a pragma statement.
**
** Pragmas are of this form:
@@ -111831,12 +112969,11 @@ SQLITE_PRIVATE void sqlite3Pragma(
Token *pId; /* Pointer to <id> token */
char *aFcntl[4]; /* Argument to SQLITE_FCNTL_PRAGMA */
int iDb; /* Database index for <database> */
- int lwr, upr, mid = 0; /* Binary search bounds */
int rc; /* return value form SQLITE_FCNTL_PRAGMA */
sqlite3 *db = pParse->db; /* The database connection */
Db *pDb; /* The specific database being pragmaed */
Vdbe *v = sqlite3GetVdbe(pParse); /* Prepared statement */
- const struct sPragmaNames *pPragma;
+ const PragmaName *pPragma; /* The pragma */
if( v==0 ) return;
sqlite3VdbeRunOnlyOnce(v);
@@ -111891,7 +113028,9 @@ SQLITE_PRIVATE void sqlite3Pragma(
db->busyHandler.nBusy = 0;
rc = sqlite3_file_control(db, zDb, SQLITE_FCNTL_PRAGMA, (void*)aFcntl);
if( rc==SQLITE_OK ){
- returnSingleText(v, "result", aFcntl[0]);
+ sqlite3VdbeSetNumCols(v, 1);
+ sqlite3VdbeSetColName(v, 0, COLNAME_NAME, aFcntl[0], SQLITE_TRANSIENT);
+ returnSingleText(v, aFcntl[0]);
sqlite3_free(aFcntl[0]);
goto pragma_out;
}
@@ -111906,26 +113045,21 @@ SQLITE_PRIVATE void sqlite3Pragma(
}
/* Locate the pragma in the lookup table */
- lwr = 0;
- upr = ArraySize(aPragmaNames)-1;
- while( lwr<=upr ){
- mid = (lwr+upr)/2;
- rc = sqlite3_stricmp(zLeft, aPragmaNames[mid].zName);
- if( rc==0 ) break;
- if( rc<0 ){
- upr = mid - 1;
- }else{
- lwr = mid + 1;
- }
- }
- if( lwr>upr ) goto pragma_out;
- pPragma = &aPragmaNames[mid];
+ pPragma = pragmaLocate(zLeft);
+ if( pPragma==0 ) goto pragma_out;
/* Make sure the database schema is loaded if the pragma requires that */
- if( (pPragma->mPragFlag & PragFlag_NeedSchema)!=0 ){
+ if( (pPragma->mPragFlg & PragFlg_NeedSchema)!=0 ){
if( sqlite3ReadSchema(pParse) ) goto pragma_out;
}
+ /* Register the result column names for pragmas that return results */
+ if( (pPragma->mPragFlg & PragFlg_NoColumns)==0
+ && ((pPragma->mPragFlg & PragFlg_NoColumns1)==0 || zRight==0)
+ ){
+ setPragmaResultColumnNames(v, pPragma);
+ }
+
/* Jump to the appropriate pragma handler */
switch( pPragma->ePragTyp ){
@@ -111962,7 +113096,6 @@ SQLITE_PRIVATE void sqlite3Pragma(
VdbeOp *aOp;
sqlite3VdbeUsesBtree(v, iDb);
if( !zRight ){
- setOneColumnName(v, "cache_size");
pParse->nMem += 2;
sqlite3VdbeVerifyNoMallocRequired(v, ArraySize(getCacheSize));
aOp = sqlite3VdbeAddOpList(v, ArraySize(getCacheSize), getCacheSize, iLn);
@@ -111997,7 +113130,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
assert( pBt!=0 );
if( !zRight ){
int size = ALWAYS(pBt) ? sqlite3BtreeGetPageSize(pBt) : 0;
- returnSingleInt(v, "page_size", size);
+ returnSingleInt(v, size);
}else{
/* Malloc may fail when setting the page-size, as there is an internal
** buffer that the pager module resizes using sqlite3_realloc().
@@ -112032,7 +113165,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
}
}
b = sqlite3BtreeSecureDelete(pBt, b);
- returnSingleInt(v, "secure_delete", b);
+ returnSingleInt(v, b);
break;
}
@@ -112064,8 +113197,6 @@ SQLITE_PRIVATE void sqlite3Pragma(
sqlite3AbsInt32(sqlite3Atoi(zRight)));
}
sqlite3VdbeAddOp2(v, OP_ResultRow, iReg, 1);
- sqlite3VdbeSetNumCols(v, 1);
- sqlite3VdbeSetColName(v, 0, COLNAME_NAME, zLeft, SQLITE_TRANSIENT);
break;
}
@@ -112111,7 +113242,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
if( eMode==PAGER_LOCKINGMODE_EXCLUSIVE ){
zRet = "exclusive";
}
- returnSingleText(v, "locking_mode", zRet);
+ returnSingleText(v, zRet);
break;
}
@@ -112124,7 +113255,6 @@ SQLITE_PRIVATE void sqlite3Pragma(
int eMode; /* One of the PAGER_JOURNALMODE_XXX symbols */
int ii; /* Loop counter */
- setOneColumnName(v, "journal_mode");
if( zRight==0 ){
/* If there is no "=MODE" part of the pragma, do a query for the
** current mode */
@@ -112170,7 +113300,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
if( iLimit<-1 ) iLimit = -1;
}
iLimit = sqlite3PagerJournalSizeLimit(pPager, iLimit);
- returnSingleInt(v, "journal_size_limit", iLimit);
+ returnSingleInt(v, iLimit);
break;
}
@@ -112188,7 +113318,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
Btree *pBt = pDb->pBt;
assert( pBt!=0 );
if( !zRight ){
- returnSingleInt(v, "auto_vacuum", sqlite3BtreeGetAutoVacuum(pBt));
+ returnSingleInt(v, sqlite3BtreeGetAutoVacuum(pBt));
}else{
int eAuto = getAutoVacuum(zRight);
assert( eAuto>=0 && eAuto<=2 );
@@ -112267,7 +113397,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
case PragTyp_CACHE_SIZE: {
assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
if( !zRight ){
- returnSingleInt(v, "cache_size", pDb->pSchema->cache_size);
+ returnSingleInt(v, pDb->pSchema->cache_size);
}else{
int size = sqlite3Atoi(zRight);
pDb->pSchema->cache_size = size;
@@ -112301,7 +113431,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
case PragTyp_CACHE_SPILL: {
assert( sqlite3SchemaMutexHeld(db, iDb, 0) );
if( !zRight ){
- returnSingleInt(v, "cache_spill",
+ returnSingleInt(v,
(db->flags & SQLITE_CacheSpill)==0 ? 0 :
sqlite3BtreeSetSpillSize(pDb->pBt,0));
}else{
@@ -112355,7 +113485,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
rc = SQLITE_OK;
#endif
if( rc==SQLITE_OK ){
- returnSingleInt(v, "mmap_size", sz);
+ returnSingleInt(v, sz);
}else if( rc!=SQLITE_NOTFOUND ){
pParse->nErr++;
pParse->rc = rc;
@@ -112376,7 +113506,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
*/
case PragTyp_TEMP_STORE: {
if( !zRight ){
- returnSingleInt(v, "temp_store", db->temp_store);
+ returnSingleInt(v, db->temp_store);
}else{
changeTempStorage(pParse, zRight);
}
@@ -112395,7 +113525,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
*/
case PragTyp_TEMP_STORE_DIRECTORY: {
if( !zRight ){
- returnSingleText(v, "temp_store_directory", sqlite3_temp_directory);
+ returnSingleText(v, sqlite3_temp_directory);
}else{
#ifndef SQLITE_OMIT_WSD
if( zRight[0] ){
@@ -112439,7 +113569,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
*/
case PragTyp_DATA_STORE_DIRECTORY: {
if( !zRight ){
- returnSingleText(v, "data_store_directory", sqlite3_data_directory);
+ returnSingleText(v, sqlite3_data_directory);
}else{
#ifndef SQLITE_OMIT_WSD
if( zRight[0] ){
@@ -112478,7 +113608,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
sqlite3_file *pFile = sqlite3PagerFile(pPager);
sqlite3OsFileControlHint(pFile, SQLITE_GET_LOCKPROXYFILE,
&proxy_file_path);
- returnSingleText(v, "lock_proxy_file", proxy_file_path);
+ returnSingleText(v, proxy_file_path);
}else{
Pager *pPager = sqlite3BtreePager(pDb->pBt);
sqlite3_file *pFile = sqlite3PagerFile(pPager);
@@ -112510,7 +113640,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
*/
case PragTyp_SYNCHRONOUS: {
if( !zRight ){
- returnSingleInt(v, "synchronous", pDb->safety_level-1);
+ returnSingleInt(v, pDb->safety_level-1);
}else{
if( !db->autoCommit ){
sqlite3ErrorMsg(pParse,
@@ -112530,7 +113660,8 @@ SQLITE_PRIVATE void sqlite3Pragma(
#ifndef SQLITE_OMIT_FLAG_PRAGMAS
case PragTyp_FLAG: {
if( zRight==0 ){
- returnSingleInt(v, pPragma->zName, (db->flags & pPragma->iArg)!=0 );
+ setPragmaResultColumnNames(v, pPragma);
+ returnSingleInt(v, (db->flags & pPragma->iArg)!=0 );
}else{
int mask = pPragma->iArg; /* Mask of bits to set or clear. */
if( db->autoCommit==0 ){
@@ -112580,16 +113711,12 @@ SQLITE_PRIVATE void sqlite3Pragma(
Table *pTab;
pTab = sqlite3LocateTable(pParse, LOCATE_NOERR, zRight, zDb);
if( pTab ){
- static const char *azCol[] = {
- "cid", "name", "type", "notnull", "dflt_value", "pk"
- };
int i, k;
int nHidden = 0;
Column *pCol;
Index *pPk = sqlite3PrimaryKeyIndex(pTab);
pParse->nMem = 6;
sqlite3CodeVerifySchema(pParse, iDb);
- setAllColumnNames(v, 6, azCol); assert( 6==ArraySize(azCol) );
sqlite3ViewGetColumnNames(pParse, pTab);
for(i=0, pCol=pTab->aCol; i<pTab->nCol; i++, pCol++){
if( IsHiddenColumn(pCol) ){
@@ -112618,13 +113745,10 @@ SQLITE_PRIVATE void sqlite3Pragma(
break;
case PragTyp_STATS: {
- static const char *azCol[] = { "table", "index", "width", "height" };
Index *pIdx;
HashElem *i;
- v = sqlite3GetVdbe(pParse);
pParse->nMem = 4;
sqlite3CodeVerifySchema(pParse, iDb);
- setAllColumnNames(v, 4, azCol); assert( 4==ArraySize(azCol) );
for(i=sqliteHashFirst(&pDb->pSchema->tblHash); i; i=sqliteHashNext(i)){
Table *pTab = sqliteHashData(i);
sqlite3VdbeMultiLoad(v, 1, "ssii",
@@ -112649,9 +113773,6 @@ SQLITE_PRIVATE void sqlite3Pragma(
Table *pTab;
pIdx = sqlite3FindIndex(db, zRight, zDb);
if( pIdx ){
- static const char *azCol[] = {
- "seqno", "cid", "name", "desc", "coll", "key"
- };
int i;
int mx;
if( pPragma->iArg ){
@@ -112665,8 +113786,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
}
pTab = pIdx->pTable;
sqlite3CodeVerifySchema(pParse, iDb);
- assert( pParse->nMem<=ArraySize(azCol) );
- setAllColumnNames(v, pParse->nMem, azCol);
+ assert( pParse->nMem<=pPragma->nPragCName );
for(i=0; i<mx; i++){
i16 cnum = pIdx->aiColumn[i];
sqlite3VdbeMultiLoad(v, 1, "iis", i, cnum,
@@ -112689,13 +113809,8 @@ SQLITE_PRIVATE void sqlite3Pragma(
int i;
pTab = sqlite3FindTable(db, zRight, zDb);
if( pTab ){
- static const char *azCol[] = {
- "seq", "name", "unique", "origin", "partial"
- };
- v = sqlite3GetVdbe(pParse);
pParse->nMem = 5;
sqlite3CodeVerifySchema(pParse, iDb);
- setAllColumnNames(v, 5, azCol); assert( 5==ArraySize(azCol) );
for(pIdx=pTab->pIndex, i=0; pIdx; pIdx=pIdx->pNext, i++){
const char *azOrigin[] = { "c", "u", "pk" };
sqlite3VdbeMultiLoad(v, 1, "isisi",
@@ -112711,10 +113826,8 @@ SQLITE_PRIVATE void sqlite3Pragma(
break;
case PragTyp_DATABASE_LIST: {
- static const char *azCol[] = { "seq", "name", "file" };
int i;
pParse->nMem = 3;
- setAllColumnNames(v, 3, azCol); assert( 3==ArraySize(azCol) );
for(i=0; i<db->nDb; i++){
if( db->aDb[i].pBt==0 ) continue;
assert( db->aDb[i].zDbSName!=0 );
@@ -112728,11 +113841,9 @@ SQLITE_PRIVATE void sqlite3Pragma(
break;
case PragTyp_COLLATION_LIST: {
- static const char *azCol[] = { "seq", "name" };
int i = 0;
HashElem *p;
pParse->nMem = 2;
- setAllColumnNames(v, 2, azCol); assert( 2==ArraySize(azCol) );
for(p=sqliteHashFirst(&db->aCollSeq); p; p=sqliteHashNext(p)){
CollSeq *pColl = (CollSeq *)sqliteHashData(p);
sqlite3VdbeMultiLoad(v, 1, "is", i++, pColl->zName);
@@ -112748,17 +113859,11 @@ SQLITE_PRIVATE void sqlite3Pragma(
Table *pTab;
pTab = sqlite3FindTable(db, zRight, zDb);
if( pTab ){
- v = sqlite3GetVdbe(pParse);
pFK = pTab->pFKey;
if( pFK ){
- static const char *azCol[] = {
- "id", "seq", "table", "from", "to", "on_update", "on_delete",
- "match"
- };
int i = 0;
pParse->nMem = 8;
sqlite3CodeVerifySchema(pParse, iDb);
- setAllColumnNames(v, 8, azCol); assert( 8==ArraySize(azCol) );
while(pFK){
int j;
for(j=0; j<pFK->nCol; j++){
@@ -112799,14 +113904,11 @@ SQLITE_PRIVATE void sqlite3Pragma(
int addrTop; /* Top of a loop checking foreign keys */
int addrOk; /* Jump here if the key is OK */
int *aiCols; /* child to parent column mapping */
- static const char *azCol[] = { "table", "rowid", "parent", "fkid" };
regResult = pParse->nMem+1;
pParse->nMem += 4;
regKey = ++pParse->nMem;
regRow = ++pParse->nMem;
- v = sqlite3GetVdbe(pParse);
- setAllColumnNames(v, 4, azCol); assert( 4==ArraySize(azCol) );
sqlite3CodeVerifySchema(pParse, iDb);
k = sqliteHashFirst(&db->aDb[iDb].pSchema->tblHash);
while( k ){
@@ -112945,7 +114047,6 @@ SQLITE_PRIVATE void sqlite3Pragma(
/* Initialize the VDBE program */
pParse->nMem = 6;
- setOneColumnName(v, "integrity_check");
/* Set the maximum error count */
mxErr = SQLITE_INTEGRITY_CHECK_ERROR_MAX;
@@ -113197,7 +114298,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
assert( encnames[SQLITE_UTF8].enc==SQLITE_UTF8 );
assert( encnames[SQLITE_UTF16LE].enc==SQLITE_UTF16LE );
assert( encnames[SQLITE_UTF16BE].enc==SQLITE_UTF16BE );
- returnSingleText(v, "encoding", encnames[ENC(pParse->db)].zName);
+ returnSingleText(v, encnames[ENC(pParse->db)].zName);
}else{ /* "PRAGMA encoding = XXX" */
/* Only change the value of sqlite.enc if the database handle is not
** initialized. If the main database exists, the new sqlite.enc value
@@ -113260,7 +114361,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
case PragTyp_HEADER_VALUE: {
int iCookie = pPragma->iArg; /* Which cookie to read or write */
sqlite3VdbeUsesBtree(v, iDb);
- if( zRight && (pPragma->mPragFlag & PragFlag_ReadOnly)==0 ){
+ if( zRight && (pPragma->mPragFlg & PragFlg_ReadOnly)==0 ){
/* Write the specified cookie value */
static const VdbeOpList setCookie[] = {
{ OP_Transaction, 0, 1, 0}, /* 0 */
@@ -113288,8 +114389,6 @@ SQLITE_PRIVATE void sqlite3Pragma(
aOp[0].p1 = iDb;
aOp[1].p1 = iDb;
aOp[1].p3 = iCookie;
- sqlite3VdbeSetNumCols(v, 1);
- sqlite3VdbeSetColName(v, 0, COLNAME_NAME, zLeft, SQLITE_TRANSIENT);
sqlite3VdbeReusable(v);
}
}
@@ -113307,7 +114406,6 @@ SQLITE_PRIVATE void sqlite3Pragma(
int i = 0;
const char *zOpt;
pParse->nMem = 1;
- setOneColumnName(v, "compile_option");
while( (zOpt = sqlite3_compileoption_get(i++))!=0 ){
sqlite3VdbeLoadString(v, 1, zOpt);
sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 1);
@@ -113324,7 +114422,6 @@ SQLITE_PRIVATE void sqlite3Pragma(
** Checkpoint the database.
*/
case PragTyp_WAL_CHECKPOINT: {
- static const char *azCol[] = { "busy", "log", "checkpointed" };
int iBt = (pId2->z?iDb:SQLITE_MAX_ATTACHED);
int eMode = SQLITE_CHECKPOINT_PASSIVE;
if( zRight ){
@@ -113336,7 +114433,6 @@ SQLITE_PRIVATE void sqlite3Pragma(
eMode = SQLITE_CHECKPOINT_TRUNCATE;
}
}
- setAllColumnNames(v, 3, azCol); assert( 3==ArraySize(azCol) );
pParse->nMem = 3;
sqlite3VdbeAddOp3(v, OP_Checkpoint, iBt, eMode, 1);
sqlite3VdbeAddOp2(v, OP_ResultRow, 1, 3);
@@ -113355,7 +114451,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
if( zRight ){
sqlite3_wal_autocheckpoint(db, sqlite3Atoi(zRight));
}
- returnSingleInt(v, "wal_autocheckpoint",
+ returnSingleInt(v,
db->xWalCallback==sqlite3WalDefaultHook ?
SQLITE_PTR_TO_INT(db->pWalArg) : 0);
}
@@ -113388,7 +114484,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
if( zRight ){
sqlite3_busy_timeout(db, sqlite3Atoi(zRight));
}
- returnSingleInt(v, "timeout", db->busyTimeout);
+ returnSingleInt(v, db->busyTimeout);
break;
}
@@ -113408,7 +114504,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
if( zRight && sqlite3DecOrHexToI64(zRight, &N)==SQLITE_OK ){
sqlite3_soft_heap_limit64(N);
}
- returnSingleInt(v, "soft_heap_limit", sqlite3_soft_heap_limit64(-1));
+ returnSingleInt(v, sqlite3_soft_heap_limit64(-1));
break;
}
@@ -113427,8 +114523,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
){
sqlite3_limit(db, SQLITE_LIMIT_WORKER_THREADS, (int)(N&0x7fffffff));
}
- returnSingleInt(v, "threads",
- sqlite3_limit(db, SQLITE_LIMIT_WORKER_THREADS, -1));
+ returnSingleInt(v, sqlite3_limit(db, SQLITE_LIMIT_WORKER_THREADS, -1));
break;
}
@@ -113440,9 +114535,7 @@ SQLITE_PRIVATE void sqlite3Pragma(
static const char *const azLockName[] = {
"unlocked", "shared", "reserved", "pending", "exclusive"
};
- static const char *azCol[] = { "database", "status" };
int i;
- setAllColumnNames(v, 2, azCol); assert( 2==ArraySize(azCol) );
pParse->nMem = 2;
for(i=0; i<db->nDb; i++){
Btree *pBt;
@@ -113508,10 +114601,325 @@ SQLITE_PRIVATE void sqlite3Pragma(
} /* End of the PRAGMA switch */
+ /* The following block is a no-op unless SQLITE_DEBUG is defined. Its only
+ ** purpose is to execute assert() statements to verify that if the
+ ** PragFlg_NoColumns1 flag is set and the caller specified an argument
+ ** to the PRAGMA, the implementation has not added any OP_ResultRow
+ ** instructions to the VM. */
+ if( (pPragma->mPragFlg & PragFlg_NoColumns1) && zRight ){
+ sqlite3VdbeVerifyNoResultRow(v);
+ }
+
pragma_out:
sqlite3DbFree(db, zLeft);
sqlite3DbFree(db, zRight);
}
+#ifndef SQLITE_OMIT_VIRTUALTABLE
+/*****************************************************************************
+** Implementation of an eponymous virtual table that runs a pragma.
+**
+*/
+typedef struct PragmaVtab PragmaVtab;
+typedef struct PragmaVtabCursor PragmaVtabCursor;
+struct PragmaVtab {
+ sqlite3_vtab base; /* Base class. Must be first */
+ sqlite3 *db; /* The database connection to which it belongs */
+ const PragmaName *pName; /* Name of the pragma */
+ u8 nHidden; /* Number of hidden columns */
+ u8 iHidden; /* Index of the first hidden column */
+};
+struct PragmaVtabCursor {
+ sqlite3_vtab_cursor base; /* Base class. Must be first */
+ sqlite3_stmt *pPragma; /* The pragma statement to run */
+ sqlite_int64 iRowid; /* Current rowid */
+ char *azArg[2]; /* Value of the argument and schema */
+};
+
+/*
+** Pragma virtual table module xConnect method.
+*/
+static int pragmaVtabConnect(
+ sqlite3 *db,
+ void *pAux,
+ int argc, const char *const*argv,
+ sqlite3_vtab **ppVtab,
+ char **pzErr
+){
+ const PragmaName *pPragma = (const PragmaName*)pAux;
+ PragmaVtab *pTab = 0;
+ int rc;
+ int i, j;
+ char cSep = '(';
+ StrAccum acc;
+ char zBuf[200];
+
+ UNUSED_PARAMETER(argc);
+ UNUSED_PARAMETER(argv);
+ sqlite3StrAccumInit(&acc, 0, zBuf, sizeof(zBuf), 0);
+ sqlite3StrAccumAppendAll(&acc, "CREATE TABLE x");
+ for(i=0, j=pPragma->iPragCName; i<pPragma->nPragCName; i++, j++){
+ sqlite3XPrintf(&acc, "%c\"%s\"", cSep, pragCName[j]);
+ cSep = ',';
+ }
+ if( i==0 ){
+ sqlite3XPrintf(&acc, "(\"%s\"", pPragma->zName);
+ cSep = ',';
+ i++;
+ }
+ j = 0;
+ if( pPragma->mPragFlg & PragFlg_Result1 ){
+ sqlite3StrAccumAppendAll(&acc, ",arg HIDDEN");
+ j++;
+ }
+ if( pPragma->mPragFlg & (PragFlg_SchemaOpt|PragFlg_SchemaReq) ){
+ sqlite3StrAccumAppendAll(&acc, ",schema HIDDEN");
+ j++;
+ }
+ sqlite3StrAccumAppend(&acc, ")", 1);
+ sqlite3StrAccumFinish(&acc);
+ assert( strlen(zBuf) < sizeof(zBuf)-1 );
+ rc = sqlite3_declare_vtab(db, zBuf);
+ if( rc==SQLITE_OK ){
+ pTab = (PragmaVtab*)sqlite3_malloc(sizeof(PragmaVtab));
+ if( pTab==0 ){
+ rc = SQLITE_NOMEM;
+ }else{
+ memset(pTab, 0, sizeof(PragmaVtab));
+ pTab->pName = pPragma;
+ pTab->db = db;
+ pTab->iHidden = i;
+ pTab->nHidden = j;
+ }
+ }else{
+ *pzErr = sqlite3_mprintf("%s", sqlite3_errmsg(db));
+ }
+
+ *ppVtab = (sqlite3_vtab*)pTab;
+ return rc;
+}
+
+/*
+** Pragma virtual table module xDisconnect method.
+*/
+static int pragmaVtabDisconnect(sqlite3_vtab *pVtab){
+ PragmaVtab *pTab = (PragmaVtab*)pVtab;
+ sqlite3_free(pTab);
+ return SQLITE_OK;
+}
+
+/* Figure out the best index to use to search a pragma virtual table.
+**
+** There are not really any index choices. But we want to encourage the
+** query planner to give == constraints on as many hidden parameters as
+** possible, and especially on the first hidden parameter. So return a
+** high cost if hidden parameters are unconstrained.
+*/
+static int pragmaVtabBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){
+ PragmaVtab *pTab = (PragmaVtab*)tab;
+ const struct sqlite3_index_constraint *pConstraint;
+ int i, j;
+ int seen[2];
+
+ pIdxInfo->estimatedCost = (double)1;
+ if( pTab->nHidden==0 ){ return SQLITE_OK; }
+ pConstraint = pIdxInfo->aConstraint;
+ seen[0] = 0;
+ seen[1] = 0;
+ for(i=0; i<pIdxInfo->nConstraint; i++, pConstraint++){
+ if( pConstraint->usable==0 ) continue;
+ if( pConstraint->op!=SQLITE_INDEX_CONSTRAINT_EQ ) continue;
+ if( pConstraint->iColumn < pTab->iHidden ) continue;
+ j = pConstraint->iColumn - pTab->iHidden;
+ assert( j < 2 );
+ seen[j] = i+1;
+ }
+ if( seen[0]==0 ){
+ pIdxInfo->estimatedCost = (double)2147483647;
+ pIdxInfo->estimatedRows = 2147483647;
+ return SQLITE_OK;
+ }
+ j = seen[0]-1;
+ pIdxInfo->aConstraintUsage[j].argvIndex = 1;
+ pIdxInfo->aConstraintUsage[j].omit = 1;
+ if( seen[1]==0 ) return SQLITE_OK;
+ pIdxInfo->estimatedCost = (double)20;
+ pIdxInfo->estimatedRows = 20;
+ j = seen[1]-1;
+ pIdxInfo->aConstraintUsage[j].argvIndex = 2;
+ pIdxInfo->aConstraintUsage[j].omit = 1;
+ return SQLITE_OK;
+}
+
+/* Create a new cursor for the pragma virtual table */
+static int pragmaVtabOpen(sqlite3_vtab *pVtab, sqlite3_vtab_cursor **ppCursor){
+ PragmaVtabCursor *pCsr;
+ pCsr = (PragmaVtabCursor*)sqlite3_malloc(sizeof(*pCsr));
+ if( pCsr==0 ) return SQLITE_NOMEM;
+ memset(pCsr, 0, sizeof(PragmaVtabCursor));
+ pCsr->base.pVtab = pVtab;
+ *ppCursor = &pCsr->base;
+ return SQLITE_OK;
+}
+
+/* Clear all content from pragma virtual table cursor. */
+static void pragmaVtabCursorClear(PragmaVtabCursor *pCsr){
+ int i;
+ sqlite3_finalize(pCsr->pPragma);
+ pCsr->pPragma = 0;
+ for(i=0; i<ArraySize(pCsr->azArg); i++){
+ sqlite3_free(pCsr->azArg[i]);
+ pCsr->azArg[i] = 0;
+ }
+}
+
+/* Close a pragma virtual table cursor */
+static int pragmaVtabClose(sqlite3_vtab_cursor *cur){
+ PragmaVtabCursor *pCsr = (PragmaVtabCursor*)cur;
+ pragmaVtabCursorClear(pCsr);
+ sqlite3_free(pCsr);
+ return SQLITE_OK;
+}
+
+/* Advance the pragma virtual table cursor to the next row */
+static int pragmaVtabNext(sqlite3_vtab_cursor *pVtabCursor){
+ PragmaVtabCursor *pCsr = (PragmaVtabCursor*)pVtabCursor;
+ int rc = SQLITE_OK;
+
+ /* Increment the xRowid value */
+ pCsr->iRowid++;
+ assert( pCsr->pPragma );
+ if( SQLITE_ROW!=sqlite3_step(pCsr->pPragma) ){
+ rc = sqlite3_finalize(pCsr->pPragma);
+ pCsr->pPragma = 0;
+ pragmaVtabCursorClear(pCsr);
+ }
+ return rc;
+}
+
+/*
+** Pragma virtual table module xFilter method.
+*/
+static int pragmaVtabFilter(
+ sqlite3_vtab_cursor *pVtabCursor,
+ int idxNum, const char *idxStr,
+ int argc, sqlite3_value **argv
+){
+ PragmaVtabCursor *pCsr = (PragmaVtabCursor*)pVtabCursor;
+ PragmaVtab *pTab = (PragmaVtab*)(pVtabCursor->pVtab);
+ int rc;
+ int i, j;
+ StrAccum acc;
+ char *zSql;
+
+ UNUSED_PARAMETER(idxNum);
+ UNUSED_PARAMETER(idxStr);
+ pragmaVtabCursorClear(pCsr);
+ j = (pTab->pName->mPragFlg & PragFlg_Result1)!=0 ? 0 : 1;
+ for(i=0; i<argc; i++, j++){
+ assert( j<ArraySize(pCsr->azArg) );
+ pCsr->azArg[j] = sqlite3_mprintf("%s", sqlite3_value_text(argv[i]));
+ if( pCsr->azArg[j]==0 ){
+ return SQLITE_NOMEM;
+ }
+ }
+ sqlite3StrAccumInit(&acc, 0, 0, 0, pTab->db->aLimit[SQLITE_LIMIT_SQL_LENGTH]);
+ sqlite3StrAccumAppendAll(&acc, "PRAGMA ");
+ if( pCsr->azArg[1] ){
+ sqlite3XPrintf(&acc, "%Q.", pCsr->azArg[1]);
+ }
+ sqlite3StrAccumAppendAll(&acc, pTab->pName->zName);
+ if( pCsr->azArg[0] ){
+ sqlite3XPrintf(&acc, "=%Q", pCsr->azArg[0]);
+ }
+ zSql = sqlite3StrAccumFinish(&acc);
+ if( zSql==0 ) return SQLITE_NOMEM;
+ rc = sqlite3_prepare_v2(pTab->db, zSql, -1, &pCsr->pPragma, 0);
+ sqlite3_free(zSql);
+ if( rc!=SQLITE_OK ){
+ pTab->base.zErrMsg = sqlite3_mprintf("%s", sqlite3_errmsg(pTab->db));
+ return rc;
+ }
+ return pragmaVtabNext(pVtabCursor);
+}
+
+/*
+** Pragma virtual table module xEof method.
+*/
+static int pragmaVtabEof(sqlite3_vtab_cursor *pVtabCursor){
+ PragmaVtabCursor *pCsr = (PragmaVtabCursor*)pVtabCursor;
+ return (pCsr->pPragma==0);
+}
+
+/* The xColumn method simply returns the corresponding column from
+** the PRAGMA.
+*/
+static int pragmaVtabColumn(
+ sqlite3_vtab_cursor *pVtabCursor,
+ sqlite3_context *ctx,
+ int i
+){
+ PragmaVtabCursor *pCsr = (PragmaVtabCursor*)pVtabCursor;
+ PragmaVtab *pTab = (PragmaVtab*)(pVtabCursor->pVtab);
+ if( i<pTab->iHidden ){
+ sqlite3_result_value(ctx, sqlite3_column_value(pCsr->pPragma, i));
+ }else{
+ sqlite3_result_text(ctx, pCsr->azArg[i-pTab->iHidden],-1,SQLITE_TRANSIENT);
+ }
+ return SQLITE_OK;
+}
+
+/*
+** Pragma virtual table module xRowid method.
+*/
+static int pragmaVtabRowid(sqlite3_vtab_cursor *pVtabCursor, sqlite_int64 *p){
+ PragmaVtabCursor *pCsr = (PragmaVtabCursor*)pVtabCursor;
+ *p = pCsr->iRowid;
+ return SQLITE_OK;
+}
+
+/* The pragma virtual table object */
+static const sqlite3_module pragmaVtabModule = {
+ 0, /* iVersion */
+ 0, /* xCreate - create a table */
+ pragmaVtabConnect, /* xConnect - connect to an existing table */
+ pragmaVtabBestIndex, /* xBestIndex - Determine search strategy */
+ pragmaVtabDisconnect, /* xDisconnect - Disconnect from a table */
+ 0, /* xDestroy - Drop a table */
+ pragmaVtabOpen, /* xOpen - open a cursor */
+ pragmaVtabClose, /* xClose - close a cursor */
+ pragmaVtabFilter, /* xFilter - configure scan constraints */
+ pragmaVtabNext, /* xNext - advance a cursor */
+ pragmaVtabEof, /* xEof */
+ pragmaVtabColumn, /* xColumn - read data */
+ pragmaVtabRowid, /* xRowid - read data */
+ 0, /* xUpdate - write data */
+ 0, /* xBegin - begin transaction */
+ 0, /* xSync - sync transaction */
+ 0, /* xCommit - commit transaction */
+ 0, /* xRollback - rollback transaction */
+ 0, /* xFindFunction - function overloading */
+ 0, /* xRename - rename the table */
+ 0, /* xSavepoint */
+ 0, /* xRelease */
+ 0 /* xRollbackTo */
+};
+
+/*
+** Check to see if zTabName is really the name of a pragma. If it is,
+** then register an eponymous virtual table for that pragma and return
+** a pointer to the Module object for the new virtual table.
+*/
+SQLITE_PRIVATE Module *sqlite3PragmaVtabRegister(sqlite3 *db, const char *zName){
+ const PragmaName *pName;
+ assert( sqlite3_strnicmp(zName, "pragma_", 7)==0 );
+ pName = pragmaLocate(zName+7);
+ if( pName==0 ) return 0;
+ if( (pName->mPragFlg & (PragFlg_Result0|PragFlg_Result1))==0 ) return 0;
+ assert( sqlite3HashFind(&db->aModule, zName)==0 );
+ return sqlite3VtabCreateModule(db, zName, &pragmaVtabModule, (void*)pName, 0);
+}
+
+#endif /* SQLITE_OMIT_VIRTUALTABLE */
#endif /* SQLITE_OMIT_PRAGMA */
@@ -114708,7 +116116,7 @@ static void addWhereTerm(
pE1 = sqlite3CreateColumnExpr(db, pSrc, iLeft, iColLeft);
pE2 = sqlite3CreateColumnExpr(db, pSrc, iRight, iColRight);
- pEq = sqlite3PExpr(pParse, TK_EQ, pE1, pE2, 0);
+ pEq = sqlite3PExpr(pParse, TK_EQ, pE1, pE2);
if( pEq && isOuterJoin ){
ExprSetProperty(pEq, EP_FromJoin);
assert( !ExprHasProperty(pEq, EP_TokenOnly|EP_Reduced) );
@@ -114895,7 +116303,7 @@ static void pushOntoSorter(
int iLimit; /* LIMIT counter */
assert( bSeq==0 || bSeq==1 );
- assert( nData==1 || regData==regOrigData );
+ assert( nData==1 || regData==regOrigData || regOrigData==0 );
if( nPrefixReg ){
assert( nPrefixReg==nExpr+bSeq );
regBase = regData - nExpr - bSeq;
@@ -114907,11 +116315,11 @@ static void pushOntoSorter(
iLimit = pSelect->iOffset ? pSelect->iOffset+1 : pSelect->iLimit;
pSort->labelDone = sqlite3VdbeMakeLabel(v);
sqlite3ExprCodeExprList(pParse, pSort->pOrderBy, regBase, regOrigData,
- SQLITE_ECEL_DUP|SQLITE_ECEL_REF);
+ SQLITE_ECEL_DUP | (regOrigData? SQLITE_ECEL_REF : 0));
if( bSeq ){
sqlite3VdbeAddOp2(v, OP_Sequence, pSort->iECursor, regBase+nExpr);
}
- if( nPrefixReg==0 ){
+ if( nPrefixReg==0 && nData>0 ){
sqlite3ExprCodeMove(pParse, regData, regBase+nExpr+bSeq, nData);
}
sqlite3VdbeAddOp3(v, OP_MakeRecord, regBase+nOBSat, nBase-nOBSat, regRecord);
@@ -114961,7 +116369,8 @@ static void pushOntoSorter(
}else{
op = OP_IdxInsert;
}
- sqlite3VdbeAddOp2(v, op, pSort->iECursor, regRecord);
+ sqlite3VdbeAddOp4Int(v, op, pSort->iECursor, regRecord,
+ regBase+nOBSat, nBase-nOBSat);
if( iLimit ){
int addr;
int r1 = 0;
@@ -114969,7 +116378,7 @@ static void pushOntoSorter(
** register is initialized with value of LIMIT+OFFSET.) After the sorter
** fills up, delete the least entry in the sorter after each insert.
** Thus we never hold more than the LIMIT+OFFSET rows in memory at once */
- addr = sqlite3VdbeAddOp3(v, OP_IfNotZero, iLimit, 0, 1); VdbeCoverage(v);
+ addr = sqlite3VdbeAddOp1(v, OP_IfNotZero, iLimit); VdbeCoverage(v);
sqlite3VdbeAddOp1(v, OP_Last, pSort->iECursor);
if( pSort->bOrderedInnerLoop ){
r1 = ++pParse->nMem;
@@ -115029,7 +116438,8 @@ static void codeDistinct(
r1 = sqlite3GetTempReg(pParse);
sqlite3VdbeAddOp4Int(v, OP_Found, iTab, addrRepeat, iMem, N); VdbeCoverage(v);
sqlite3VdbeAddOp3(v, OP_MakeRecord, iMem, N, r1);
- sqlite3VdbeAddOp2(v, OP_IdxInsert, iTab, r1);
+ sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iTab, r1, iMem, N);
+ sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT);
sqlite3ReleaseTempReg(pParse, r1);
}
@@ -115040,7 +116450,7 @@ static void codeDistinct(
** If srcTab is negative, then the pEList expressions
** are evaluated in order to get the data for this row. If srcTab is
** zero or more, then data is pulled from srcTab and pEList is used only
-** to get number columns and the datatype for each column.
+** to get the number of columns and the collation sequence for each column.
*/
static void selectInnerLoop(
Parse *pParse, /* The parser context */
@@ -115055,13 +116465,20 @@ static void selectInnerLoop(
){
Vdbe *v = pParse->pVdbe;
int i;
- int hasDistinct; /* True if the DISTINCT keyword is present */
- int regResult; /* Start of memory holding result set */
+ int hasDistinct; /* True if the DISTINCT keyword is present */
int eDest = pDest->eDest; /* How to dispose of results */
int iParm = pDest->iSDParm; /* First argument to disposal method */
int nResultCol; /* Number of result columns */
int nPrefixReg = 0; /* Number of extra registers before regResult */
+ /* Usually, regResult is the first cell in an array of memory cells
+ ** containing the current result row. In this case regOrig is set to the
+ ** same value. However, if the results are being sent to the sorter, the
+ ** values for any expressions that are also part of the sort-key are omitted
+ ** from this array. In this case regOrig is set to zero. */
+ int regResult; /* Start of memory holding current results */
+ int regOrig; /* Start of memory holding full result (or 0) */
+
assert( v );
assert( pEList!=0 );
hasDistinct = pDistinct ? pDistinct->eTnctType : WHERE_DISTINCT_NOOP;
@@ -115092,7 +116509,7 @@ static void selectInnerLoop(
pParse->nMem += nResultCol;
}
pDest->nSdst = nResultCol;
- regResult = pDest->iSdst;
+ regOrig = regResult = pDest->iSdst;
if( srcTab>=0 ){
for(i=0; i<nResultCol; i++){
sqlite3VdbeAddOp3(v, OP_Column, srcTab, i, regResult+i);
@@ -115108,7 +116525,25 @@ static void selectInnerLoop(
}else{
ecelFlags = 0;
}
- sqlite3ExprCodeExprList(pParse, pEList, regResult, 0, ecelFlags);
+ if( pSort && hasDistinct==0 && eDest!=SRT_EphemTab && eDest!=SRT_Table ){
+ /* For each expression in pEList that is a copy of an expression in
+ ** the ORDER BY clause (pSort->pOrderBy), set the associated
+ ** iOrderByCol value to one more than the index of the ORDER BY
+ ** expression within the sort-key that pushOntoSorter() will generate.
+ ** This allows the pEList field to be omitted from the sorted record,
+ ** saving space and CPU cycles. */
+ ecelFlags |= (SQLITE_ECEL_OMITREF|SQLITE_ECEL_REF);
+ for(i=pSort->nOBSat; i<pSort->pOrderBy->nExpr; i++){
+ int j;
+ if( (j = pSort->pOrderBy->a[i].u.x.iOrderByCol)>0 ){
+ pEList->a[j-1].u.x.iOrderByCol = i+1-pSort->nOBSat;
+ }
+ }
+ regOrig = 0;
+ assert( eDest==SRT_Set || eDest==SRT_Mem
+ || eDest==SRT_Coroutine || eDest==SRT_Output );
+ }
+ nResultCol = sqlite3ExprCodeExprList(pParse,pEList,regResult,0,ecelFlags);
}
/* If the DISTINCT keyword was present on the SELECT statement
@@ -115182,7 +116617,7 @@ static void selectInnerLoop(
int r1;
r1 = sqlite3GetTempReg(pParse);
sqlite3VdbeAddOp3(v, OP_MakeRecord, regResult, nResultCol, r1);
- sqlite3VdbeAddOp2(v, OP_IdxInsert, iParm, r1);
+ sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iParm, r1, regResult, nResultCol);
sqlite3ReleaseTempReg(pParse, r1);
break;
}
@@ -115219,7 +116654,7 @@ static void selectInnerLoop(
int addr = sqlite3VdbeCurrentAddr(v) + 4;
sqlite3VdbeAddOp4Int(v, OP_Found, iParm+1, addr, r1, 0);
VdbeCoverage(v);
- sqlite3VdbeAddOp2(v, OP_IdxInsert, iParm+1, r1);
+ sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iParm+1, r1,regResult,nResultCol);
assert( pSort==0 );
}
#endif
@@ -115248,14 +116683,14 @@ static void selectInnerLoop(
** does not matter. But there might be a LIMIT clause, in which
** case the order does matter */
pushOntoSorter(
- pParse, pSort, p, regResult, regResult, nResultCol, nPrefixReg);
+ pParse, pSort, p, regResult, regOrig, nResultCol, nPrefixReg);
}else{
int r1 = sqlite3GetTempReg(pParse);
assert( sqlite3Strlen30(pDest->zAffSdst)==nResultCol );
sqlite3VdbeAddOp4(v, OP_MakeRecord, regResult, nResultCol,
r1, pDest->zAffSdst, nResultCol);
sqlite3ExprCacheAffinityChange(pParse, regResult, nResultCol);
- sqlite3VdbeAddOp2(v, OP_IdxInsert, iParm, r1);
+ sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iParm, r1, regResult, nResultCol);
sqlite3ReleaseTempReg(pParse, r1);
}
break;
@@ -115274,11 +116709,12 @@ static void selectInnerLoop(
** memory cells and break out of the scan loop.
*/
case SRT_Mem: {
- assert( nResultCol==pDest->nSdst );
if( pSort ){
+ assert( nResultCol<=pDest->nSdst );
pushOntoSorter(
- pParse, pSort, p, regResult, regResult, nResultCol, nPrefixReg);
+ pParse, pSort, p, regResult, regOrig, nResultCol, nPrefixReg);
}else{
+ assert( nResultCol==pDest->nSdst );
assert( regResult==iParm );
/* The LIMIT clause will jump out of the loop for us */
}
@@ -115291,7 +116727,7 @@ static void selectInnerLoop(
testcase( eDest==SRT_Coroutine );
testcase( eDest==SRT_Output );
if( pSort ){
- pushOntoSorter(pParse, pSort, p, regResult, regResult, nResultCol,
+ pushOntoSorter(pParse, pSort, p, regResult, regOrig, nResultCol,
nPrefixReg);
}else if( eDest==SRT_Coroutine ){
sqlite3VdbeAddOp1(v, OP_Yield, pDest->iSDParm);
@@ -115341,7 +116777,7 @@ static void selectInnerLoop(
}
sqlite3VdbeAddOp2(v, OP_Sequence, iParm, r2+nKey);
sqlite3VdbeAddOp3(v, OP_MakeRecord, r2, nKey+2, r1);
- sqlite3VdbeAddOp2(v, OP_IdxInsert, iParm, r1);
+ sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iParm, r1, r2, nKey+2);
if( addrTest ) sqlite3VdbeJumpHere(v, addrTest);
sqlite3ReleaseTempReg(pParse, r1);
sqlite3ReleaseTempRange(pParse, r2, nKey+2);
@@ -115576,14 +117012,13 @@ static void generateSortTail(
int iParm = pDest->iSDParm;
int regRow;
int regRowid;
+ int iCol;
int nKey;
int iSortTab; /* Sorter cursor to read from */
int nSortData; /* Trailing values to read from sorter */
int i;
int bSeq; /* True if sorter record includes seq. no. */
-#ifdef SQLITE_ENABLE_EXPLAIN_COMMENTS
struct ExprList_item *aOutEx = p->pEList->a;
-#endif
assert( addrBreak<0 );
if( pSort->labelBkOut ){
@@ -115621,11 +117056,18 @@ static void generateSortTail(
iSortTab = iTab;
bSeq = 1;
}
- for(i=0; i<nSortData; i++){
- sqlite3VdbeAddOp3(v, OP_Column, iSortTab, nKey+bSeq+i, regRow+i);
+ for(i=0, iCol=nKey+bSeq; i<nSortData; i++){
+ int iRead;
+ if( aOutEx[i].u.x.iOrderByCol ){
+ iRead = aOutEx[i].u.x.iOrderByCol-1;
+ }else{
+ iRead = iCol++;
+ }
+ sqlite3VdbeAddOp3(v, OP_Column, iSortTab, iRead, regRow+i);
VdbeComment((v, "%s", aOutEx[i].zName ? aOutEx[i].zName : aOutEx[i].zSpan));
}
switch( eDest ){
+ case SRT_Table:
case SRT_EphemTab: {
sqlite3VdbeAddOp2(v, OP_NewRowid, iParm, regRowid);
sqlite3VdbeAddOp3(v, OP_Insert, iParm, regRow, regRowid);
@@ -115638,7 +117080,7 @@ static void generateSortTail(
sqlite3VdbeAddOp4(v, OP_MakeRecord, regRow, nColumn, regRowid,
pDest->zAffSdst, nColumn);
sqlite3ExprCacheAffinityChange(pParse, regRow, nColumn);
- sqlite3VdbeAddOp2(v, OP_IdxInsert, iParm, regRowid);
+ sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iParm, regRowid, regRow, nColumn);
break;
}
case SRT_Mem: {
@@ -116147,7 +117589,7 @@ SQLITE_PRIVATE Table *sqlite3ResultSetOfSelect(Parse *pParse, Select *pSelect){
/* The sqlite3ResultSetOfSelect() is only used n contexts where lookaside
** is disabled */
assert( db->lookaside.bDisable );
- pTab->nRef = 1;
+ pTab->nTabRef = 1;
pTab->zName = 0;
pTab->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) );
sqlite3ColumnsFromExprList(pParse, pSelect->pEList, &pTab->nCol, &pTab->aCol);
@@ -116378,6 +117820,7 @@ static void generateWithRecursiveQuery(
/* Process the LIMIT and OFFSET clauses, if they exist */
addrBreak = sqlite3VdbeMakeLabel(v);
+ p->nSelectRow = 320; /* 4 billion rows */
computeLimitRegisters(pParse, p, addrBreak);
pLimit = p->pLimit;
pOffset = p->pOffset;
@@ -116847,7 +118290,7 @@ static int multiSelect(
computeLimitRegisters(pParse, p, iBreak);
sqlite3VdbeAddOp2(v, OP_Rewind, tab1, iBreak); VdbeCoverage(v);
r1 = sqlite3GetTempReg(pParse);
- iStart = sqlite3VdbeAddOp2(v, OP_RowKey, tab1, r1);
+ iStart = sqlite3VdbeAddOp2(v, OP_RowData, tab1, r1);
sqlite3VdbeAddOp4Int(v, OP_NotFound, tab2, iCont, r1, 0); VdbeCoverage(v);
sqlite3ReleaseTempReg(pParse, r1);
selectInnerLoop(pParse, p, p->pEList, tab1,
@@ -117014,7 +118457,8 @@ static int generateOutputSubroutine(
sqlite3VdbeAddOp4(v, OP_MakeRecord, pIn->iSdst, pIn->nSdst,
r1, pDest->zAffSdst, pIn->nSdst);
sqlite3ExprCacheAffinityChange(pParse, pIn->iSdst, pIn->nSdst);
- sqlite3VdbeAddOp2(v, OP_IdxInsert, pDest->iSDParm, r1);
+ sqlite3VdbeAddOp4Int(v, OP_IdxInsert, pDest->iSDParm, r1,
+ pIn->iSdst, pIn->nSdst);
sqlite3ReleaseTempReg(pParse, r1);
break;
}
@@ -117473,8 +118917,8 @@ static int multiSelectOrderBy(
#if !defined(SQLITE_OMIT_SUBQUERY) || !defined(SQLITE_OMIT_VIEW)
/* Forward Declarations */
-static void substExprList(sqlite3*, ExprList*, int, ExprList*);
-static void substSelect(sqlite3*, Select *, int, ExprList*, int);
+static void substExprList(Parse*, ExprList*, int, ExprList*);
+static void substSelect(Parse*, Select *, int, ExprList*, int);
/*
** Scan through the expression pExpr. Replace every reference to
@@ -117490,36 +118934,46 @@ static void substSelect(sqlite3*, Select *, int, ExprList*, int);
** of the subquery rather the result set of the subquery.
*/
static Expr *substExpr(
- sqlite3 *db, /* Report malloc errors to this connection */
+ Parse *pParse, /* Report errors here */
Expr *pExpr, /* Expr in which substitution occurs */
int iTable, /* Table to be substituted */
ExprList *pEList /* Substitute expressions */
){
+ sqlite3 *db = pParse->db;
if( pExpr==0 ) return 0;
if( pExpr->op==TK_COLUMN && pExpr->iTable==iTable ){
if( pExpr->iColumn<0 ){
pExpr->op = TK_NULL;
}else{
Expr *pNew;
+ Expr *pCopy = pEList->a[pExpr->iColumn].pExpr;
assert( pEList!=0 && pExpr->iColumn<pEList->nExpr );
assert( pExpr->pLeft==0 && pExpr->pRight==0 );
- pNew = sqlite3ExprDup(db, pEList->a[pExpr->iColumn].pExpr, 0);
- sqlite3ExprDelete(db, pExpr);
- pExpr = pNew;
+ if( sqlite3ExprIsVector(pCopy) ){
+ sqlite3VectorErrorMsg(pParse, pCopy);
+ }else{
+ pNew = sqlite3ExprDup(db, pCopy, 0);
+ if( pNew && (pExpr->flags & EP_FromJoin) ){
+ pNew->iRightJoinTable = pExpr->iRightJoinTable;
+ pNew->flags |= EP_FromJoin;
+ }
+ sqlite3ExprDelete(db, pExpr);
+ pExpr = pNew;
+ }
}
}else{
- pExpr->pLeft = substExpr(db, pExpr->pLeft, iTable, pEList);
- pExpr->pRight = substExpr(db, pExpr->pRight, iTable, pEList);
+ pExpr->pLeft = substExpr(pParse, pExpr->pLeft, iTable, pEList);
+ pExpr->pRight = substExpr(pParse, pExpr->pRight, iTable, pEList);
if( ExprHasProperty(pExpr, EP_xIsSelect) ){
- substSelect(db, pExpr->x.pSelect, iTable, pEList, 1);
+ substSelect(pParse, pExpr->x.pSelect, iTable, pEList, 1);
}else{
- substExprList(db, pExpr->x.pList, iTable, pEList);
+ substExprList(pParse, pExpr->x.pList, iTable, pEList);
}
}
return pExpr;
}
static void substExprList(
- sqlite3 *db, /* Report malloc errors here */
+ Parse *pParse, /* Report errors here */
ExprList *pList, /* List to scan and in which to make substitutes */
int iTable, /* Table to be substituted */
ExprList *pEList /* Substitute values */
@@ -117527,11 +118981,11 @@ static void substExprList(
int i;
if( pList==0 ) return;
for(i=0; i<pList->nExpr; i++){
- pList->a[i].pExpr = substExpr(db, pList->a[i].pExpr, iTable, pEList);
+ pList->a[i].pExpr = substExpr(pParse, pList->a[i].pExpr, iTable, pEList);
}
}
static void substSelect(
- sqlite3 *db, /* Report malloc errors here */
+ Parse *pParse, /* Report errors here */
Select *p, /* SELECT statement in which to make substitutions */
int iTable, /* Table to be replaced */
ExprList *pEList, /* Substitute values */
@@ -117542,17 +118996,17 @@ static void substSelect(
int i;
if( !p ) return;
do{
- substExprList(db, p->pEList, iTable, pEList);
- substExprList(db, p->pGroupBy, iTable, pEList);
- substExprList(db, p->pOrderBy, iTable, pEList);
- p->pHaving = substExpr(db, p->pHaving, iTable, pEList);
- p->pWhere = substExpr(db, p->pWhere, iTable, pEList);
+ substExprList(pParse, p->pEList, iTable, pEList);
+ substExprList(pParse, p->pGroupBy, iTable, pEList);
+ substExprList(pParse, p->pOrderBy, iTable, pEList);
+ p->pHaving = substExpr(pParse, p->pHaving, iTable, pEList);
+ p->pWhere = substExpr(pParse, p->pWhere, iTable, pEList);
pSrc = p->pSrc;
assert( pSrc!=0 );
for(i=pSrc->nSrc, pItem=pSrc->a; i>0; i--, pItem++){
- substSelect(db, pItem->pSelect, iTable, pEList, 1);
+ substSelect(pParse, pItem->pSelect, iTable, pEList, 1);
if( pItem->fg.isTabFunc ){
- substExprList(db, pItem->u1.pFuncArg, iTable, pEList);
+ substExprList(pParse, pItem->u1.pFuncArg, iTable, pEList);
}
}
}while( doPrior && (p = p->pPrior)!=0 );
@@ -117949,12 +119403,12 @@ static int flattenSubquery(
*/
if( ALWAYS(pSubitem->pTab!=0) ){
Table *pTabToDel = pSubitem->pTab;
- if( pTabToDel->nRef==1 ){
+ if( pTabToDel->nTabRef==1 ){
Parse *pToplevel = sqlite3ParseToplevel(pParse);
pTabToDel->pNextZombie = pToplevel->pZombieTab;
pToplevel->pZombieTab = pTabToDel;
}else{
- pTabToDel->nRef--;
+ pTabToDel->nTabRef--;
}
pSubitem->pTab = 0;
}
@@ -118077,7 +119531,7 @@ static int flattenSubquery(
}else{
pParent->pWhere = sqlite3ExprAnd(db, pWhere, pParent->pWhere);
}
- substSelect(db, pParent, iParent, pSub->pEList, 0);
+ substSelect(pParse, pParent, iParent, pSub->pEList, 0);
/* The flattened query is distinct if either the inner or the
** outer query is distinct.
@@ -118151,7 +119605,7 @@ static int flattenSubquery(
** terms are duplicated into the subquery.
*/
static int pushDownWhereTerms(
- sqlite3 *db, /* The database connection (for malloc()) */
+ Parse *pParse, /* Parse context (for malloc() and error reporting) */
Select *pSubq, /* The subquery whose WHERE clause is to be augmented */
Expr *pWhere, /* The WHERE clause of the outer query */
int iCursor /* Cursor number of the subquery */
@@ -118172,16 +119626,16 @@ static int pushDownWhereTerms(
return 0; /* restriction (3) */
}
while( pWhere->op==TK_AND ){
- nChng += pushDownWhereTerms(db, pSubq, pWhere->pRight, iCursor);
+ nChng += pushDownWhereTerms(pParse, pSubq, pWhere->pRight, iCursor);
pWhere = pWhere->pLeft;
}
if( ExprHasProperty(pWhere,EP_FromJoin) ) return 0; /* restriction 5 */
if( sqlite3ExprIsTableConstant(pWhere, iCursor) ){
nChng++;
while( pSubq ){
- pNew = sqlite3ExprDup(db, pWhere, 0);
- pNew = substExpr(db, pNew, iCursor, pSubq->pEList);
- pSubq->pWhere = sqlite3ExprAnd(db, pSubq->pWhere, pNew);
+ pNew = sqlite3ExprDup(pParse->db, pWhere, 0);
+ pNew = substExpr(pParse, pNew, iCursor, pSubq->pEList);
+ pSubq->pWhere = sqlite3ExprAnd(pParse->db, pSubq->pWhere, pNew);
pSubq = pSubq->pPrior;
}
}
@@ -118473,7 +119927,7 @@ static int withExpand(
assert( pFrom->pTab==0 );
pFrom->pTab = pTab = sqlite3DbMallocZero(db, sizeof(Table));
if( pTab==0 ) return WRC_Abort;
- pTab->nRef = 1;
+ pTab->nTabRef = 1;
pTab->zName = sqlite3DbStrDup(db, pCte->zName);
pTab->iPKey = -1;
pTab->nRowLogEst = 200; assert( 200==sqlite3LogEst(1048576) );
@@ -118496,25 +119950,33 @@ static int withExpand(
){
pItem->pTab = pTab;
pItem->fg.isRecursive = 1;
- pTab->nRef++;
+ pTab->nTabRef++;
pSel->selFlags |= SF_Recursive;
}
}
}
/* Only one recursive reference is permitted. */
- if( pTab->nRef>2 ){
+ if( pTab->nTabRef>2 ){
sqlite3ErrorMsg(
pParse, "multiple references to recursive table: %s", pCte->zName
);
return SQLITE_ERROR;
}
- assert( pTab->nRef==1 || ((pSel->selFlags&SF_Recursive) && pTab->nRef==2 ));
+ assert( pTab->nTabRef==1 || ((pSel->selFlags&SF_Recursive) && pTab->nTabRef==2 ));
pCte->zCteErr = "circular reference: %s";
pSavedWith = pParse->pWith;
pParse->pWith = pWith;
- sqlite3WalkSelect(pWalker, bMayRecursive ? pSel->pPrior : pSel);
+ if( bMayRecursive ){
+ Select *pPrior = pSel->pPrior;
+ assert( pPrior->pWith==0 );
+ pPrior->pWith = pSel->pWith;
+ sqlite3WalkSelect(pWalker, pPrior);
+ pPrior->pWith = 0;
+ }else{
+ sqlite3WalkSelect(pWalker, pSel);
+ }
pParse->pWith = pWith;
for(pLeft=pSel; pLeft->pPrior; pLeft=pLeft->pPrior);
@@ -118558,10 +120020,12 @@ static int withExpand(
*/
static void selectPopWith(Walker *pWalker, Select *p){
Parse *pParse = pWalker->pParse;
- With *pWith = findRightmost(p)->pWith;
- if( pWith!=0 ){
- assert( pParse->pWith==pWith );
- pParse->pWith = pWith->pOuter;
+ if( pParse->pWith && p->pPrior==0 ){
+ With *pWith = findRightmost(p)->pWith;
+ if( pWith!=0 ){
+ assert( pParse->pWith==pWith );
+ pParse->pWith = pWith->pOuter;
+ }
}
}
#else
@@ -118611,8 +120075,8 @@ static int selectExpander(Walker *pWalker, Select *p){
}
pTabList = p->pSrc;
pEList = p->pEList;
- if( pWalker->xSelectCallback2==selectPopWith ){
- sqlite3WithPush(pParse, findRightmost(p)->pWith, 0);
+ if( p->pWith ){
+ sqlite3WithPush(pParse, p->pWith, 0);
}
/* Make sure cursor numbers have been assigned to all entries in
@@ -118642,7 +120106,7 @@ static int selectExpander(Walker *pWalker, Select *p){
if( sqlite3WalkSelect(pWalker, pSel) ) return WRC_Abort;
pFrom->pTab = pTab = sqlite3DbMallocZero(db, sizeof(Table));
if( pTab==0 ) return WRC_Abort;
- pTab->nRef = 1;
+ pTab->nTabRef = 1;
pTab->zName = sqlite3MPrintf(db, "sqlite_sq_%p", (void*)pTab);
while( pSel->pPrior ){ pSel = pSel->pPrior; }
sqlite3ColumnsFromExprList(pParse, pSel->pEList,&pTab->nCol,&pTab->aCol);
@@ -118655,13 +120119,13 @@ static int selectExpander(Walker *pWalker, Select *p){
assert( pFrom->pTab==0 );
pFrom->pTab = pTab = sqlite3LocateTableItem(pParse, 0, pFrom);
if( pTab==0 ) return WRC_Abort;
- if( pTab->nRef==0xffff ){
+ if( pTab->nTabRef>=0xffff ){
sqlite3ErrorMsg(pParse, "too many references to \"%s\": max 65535",
pTab->zName);
pFrom->pTab = 0;
return WRC_Abort;
}
- pTab->nRef++;
+ pTab->nTabRef++;
if( !IsVirtual(pTab) && cannotBeFunction(pParse, pFrom) ){
return WRC_Abort;
}
@@ -118811,10 +120275,10 @@ static int selectExpander(Walker *pWalker, Select *p){
if( longNames || pTabList->nSrc>1 ){
Expr *pLeft;
pLeft = sqlite3Expr(db, TK_ID, zTabName);
- pExpr = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight, 0);
+ pExpr = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight);
if( zSchemaName ){
pLeft = sqlite3Expr(db, TK_ID, zSchemaName);
- pExpr = sqlite3PExpr(pParse, TK_DOT, pLeft, pExpr, 0);
+ pExpr = sqlite3PExpr(pParse, TK_DOT, pLeft, pExpr);
}
if( longNames ){
zColname = sqlite3MPrintf(db, "%s.%s", zTabName, zName);
@@ -118899,9 +120363,7 @@ static void sqlite3SelectExpand(Parse *pParse, Select *pSelect){
sqlite3WalkSelect(&w, pSelect);
}
w.xSelectCallback = selectExpander;
- if( (pSelect->selFlags & SF_MultiValue)==0 ){
- w.xSelectCallback2 = selectPopWith;
- }
+ w.xSelectCallback2 = selectPopWith;
sqlite3WalkSelect(&w, pSelect);
}
@@ -119051,8 +120513,8 @@ static void finalizeAggFunctions(Parse *pParse, AggInfo *pAggInfo){
for(i=0, pF=pAggInfo->aFunc; i<pAggInfo->nFunc; i++, pF++){
ExprList *pList = pF->pExpr->x.pList;
assert( !ExprHasProperty(pF->pExpr, EP_xIsSelect) );
- sqlite3VdbeAddOp4(v, OP_AggFinal, pF->iMem, pList ? pList->nExpr : 0, 0,
- (void*)pF->pFunc, P4_FUNCDEF);
+ sqlite3VdbeAddOp2(v, OP_AggFinal, pF->iMem, pList ? pList->nExpr : 0);
+ sqlite3VdbeAppendP4(v, pF->pFunc, P4_FUNCDEF);
}
}
@@ -119103,8 +120565,8 @@ static void updateAccumulator(Parse *pParse, AggInfo *pAggInfo){
if( regHit==0 && pAggInfo->nAccumulator ) regHit = ++pParse->nMem;
sqlite3VdbeAddOp4(v, OP_CollSeq, regHit, 0, 0, (char *)pColl, P4_COLLSEQ);
}
- sqlite3VdbeAddOp4(v, OP_AggStep0, 0, regAgg, pF->iMem,
- (void*)pF->pFunc, P4_FUNCDEF);
+ sqlite3VdbeAddOp3(v, OP_AggStep0, 0, regAgg, pF->iMem);
+ sqlite3VdbeAppendP4(v, pF->pFunc, P4_FUNCDEF);
sqlite3VdbeChangeP5(v, (u8)nArg);
sqlite3ExprCacheAffinityChange(pParse, regAgg, nArg);
sqlite3ReleaseTempRange(pParse, regAgg, nArg);
@@ -119338,7 +120800,7 @@ SQLITE_PRIVATE int sqlite3Select(
** inside the subquery. This can help the subquery to run more efficiently.
*/
if( (pItem->fg.jointype & JT_OUTER)==0
- && pushDownWhereTerms(db, pSub, p->pWhere, pItem->iCursor)
+ && pushDownWhereTerms(pParse, pSub, p->pWhere, pItem->iCursor)
){
#if SELECTTRACE_ENABLED
if( sqlite3SelectTrace & 0x100 ){
@@ -119500,7 +120962,9 @@ SQLITE_PRIVATE int sqlite3Select(
/* Set the limiter.
*/
iEnd = sqlite3VdbeMakeLabel(v);
- p->nSelectRow = 320; /* 4 billion rows */
+ if( (p->selFlags & SF_FixedLimit)==0 ){
+ p->nSelectRow = 320; /* 4 billion rows */
+ }
computeLimitRegisters(pParse, p, iEnd);
if( p->iLimit==0 && sSort.addrSortIndex>=0 ){
sqlite3VdbeChangeOpcode(v, sSort.addrSortIndex, OP_SorterOpen);
@@ -119978,7 +121442,7 @@ SQLITE_PRIVATE int sqlite3Select(
** of output.
*/
resetAccumulator(pParse, &sAggInfo);
- pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, pMinMax,0,flag,0);
+ pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, pMinMax, 0,flag,0);
if( pWInfo==0 ){
sqlite3ExprListDelete(db, pDel);
goto select_end;
@@ -120067,8 +121531,6 @@ select_end:
** if they are not used.
*/
/* #include "sqliteInt.h" */
-/* #include <stdlib.h> */
-/* #include <string.h> */
#ifndef SQLITE_OMIT_GET_TABLE
@@ -120561,7 +122023,7 @@ SQLITE_PRIVATE void sqlite3FinishTrigger(
z = sqlite3DbStrNDup(db, (char*)pAll->z, pAll->n);
sqlite3NestedParse(pParse,
"INSERT INTO %Q.%s VALUES('trigger',%Q,%Q,0,'CREATE TRIGGER %q')",
- db->aDb[iDb].zDbSName, SCHEMA_TABLE(iDb), zName,
+ db->aDb[iDb].zDbSName, MASTER_NAME, zName,
pTrig->table, z);
sqlite3DbFree(db, z);
sqlite3ChangeCookie(pParse, iDb);
@@ -120812,7 +122274,7 @@ SQLITE_PRIVATE void sqlite3DropTriggerPtr(Parse *pParse, Trigger *pTrigger){
if( (v = sqlite3GetVdbe(pParse))!=0 ){
sqlite3NestedParse(pParse,
"DELETE FROM %Q.%s WHERE name=%Q AND type='trigger'",
- db->aDb[iDb].zDbSName, SCHEMA_TABLE(iDb), pTrigger->zName
+ db->aDb[iDb].zDbSName, MASTER_NAME, pTrigger->zName
);
sqlite3ChangeCookie(pParse, iDb);
sqlite3VdbeAddOp4(v, OP_DropTrigger, iDb, 0, 0, pTrigger->zName, 0);
@@ -121424,14 +122886,14 @@ SQLITE_PRIVATE void sqlite3ColumnDefault(Vdbe *v, Table *pTab, int i, int iReg){
sqlite3ValueFromExpr(sqlite3VdbeDb(v), pCol->pDflt, enc,
pCol->affinity, &pValue);
if( pValue ){
- sqlite3VdbeChangeP4(v, -1, (const char *)pValue, P4_MEM);
+ sqlite3VdbeAppendP4(v, pValue, P4_MEM);
}
+ }
#ifndef SQLITE_OMIT_FLOATING_POINT
- if( pTab->aCol[i].affinity==SQLITE_AFF_REAL ){
- sqlite3VdbeAddOp1(v, OP_RealAffinity, iReg);
- }
-#endif
+ if( pTab->aCol[i].affinity==SQLITE_AFF_REAL ){
+ sqlite3VdbeAddOp1(v, OP_RealAffinity, iReg);
}
+#endif
}
/*
@@ -121460,7 +122922,7 @@ SQLITE_PRIVATE void sqlite3Update(
int iDataCur; /* Cursor for the canonical data btree */
int iIdxCur; /* Cursor for the first index */
sqlite3 *db; /* The database structure */
- int *aRegIdx = 0; /* One register assigned to each index to be updated */
+ int *aRegIdx = 0; /* First register in array assigned to each index */
int *aXRef = 0; /* aXRef[i] is the index in pChanges->a[] of the
** an expression for the i-th column of the table.
** aXRef[i]==-1 if the i-th column is not changed. */
@@ -121472,10 +122934,11 @@ SQLITE_PRIVATE void sqlite3Update(
AuthContext sContext; /* The authorization context */
NameContext sNC; /* The name-context to resolve expressions in */
int iDb; /* Database containing the table being updated */
- int okOnePass; /* True for one-pass algorithm without the FIFO */
+ int eOnePass; /* ONEPASS_XXX value from where.c */
int hasFK; /* True if foreign key processing is required */
int labelBreak; /* Jump here to break out of UPDATE loop */
int labelContinue; /* Jump here to continue next step of UPDATE loop */
+ int flags; /* Flags for sqlite3WhereBegin() */
#ifndef SQLITE_OMIT_TRIGGER
int isView; /* True when updating a view (INSTEAD OF trigger) */
@@ -121486,6 +122949,10 @@ SQLITE_PRIVATE void sqlite3Update(
int iEph = 0; /* Ephemeral table holding all primary key values */
int nKey = 0; /* Number of elements in regKey for WITHOUT ROWID */
int aiCurOnePass[2]; /* The write cursors opened by WHERE_ONEPASS */
+ int addrOpen = 0; /* Address of OP_OpenEphemeral */
+ int iPk = 0; /* First of nPk cells holding PRIMARY KEY value */
+ i16 nPk = 0; /* Number of components of the PRIMARY KEY */
+ int bReplace = 0; /* True if REPLACE conflict resolution might happen */
/* Register Allocations */
int regRowCount = 0; /* A count of rows changed */
@@ -121637,12 +123104,19 @@ SQLITE_PRIVATE void sqlite3Update(
int reg;
if( chngKey || hasFK || pIdx->pPartIdxWhere || pIdx==pPk ){
reg = ++pParse->nMem;
+ pParse->nMem += pIdx->nColumn;
}else{
reg = 0;
for(i=0; i<pIdx->nKeyCol; i++){
i16 iIdxCol = pIdx->aiColumn[i];
if( iIdxCol<0 || aXRef[iIdxCol]>=0 ){
reg = ++pParse->nMem;
+ pParse->nMem += pIdx->nColumn;
+ if( (onError==OE_Replace)
+ || (onError==OE_Default && pIdx->onError==OE_Replace)
+ ){
+ bReplace = 1;
+ }
break;
}
}
@@ -121650,6 +123124,11 @@ SQLITE_PRIVATE void sqlite3Update(
if( reg==0 ) aToOpen[j+1] = 0;
aRegIdx[j] = reg;
}
+ if( bReplace ){
+ /* If REPLACE conflict resolution might be invoked, open cursors on all
+ ** indexes in case they are needed to delete records. */
+ memset(aToOpen, 1, nIdx+1);
+ }
/* Begin generating code. */
v = sqlite3GetVdbe(pParse);
@@ -121702,110 +123181,130 @@ SQLITE_PRIVATE void sqlite3Update(
}
#endif
- /* Begin the database scan
- */
+ /* Initialize the count of updated rows */
+ if( (db->flags & SQLITE_CountRows) && !pParse->pTriggerTab ){
+ regRowCount = ++pParse->nMem;
+ sqlite3VdbeAddOp2(v, OP_Integer, 0, regRowCount);
+ }
+
if( HasRowid(pTab) ){
sqlite3VdbeAddOp3(v, OP_Null, 0, regRowSet, regOldRowid);
- pWInfo = sqlite3WhereBegin(
- pParse, pTabList, pWhere, 0, 0,
- WHERE_ONEPASS_DESIRED | WHERE_SEEK_TABLE, iIdxCur
- );
- if( pWInfo==0 ) goto update_cleanup;
- okOnePass = sqlite3WhereOkOnePass(pWInfo, aiCurOnePass);
-
- /* Remember the rowid of every item to be updated.
- */
- sqlite3VdbeAddOp2(v, OP_Rowid, iDataCur, regOldRowid);
- if( !okOnePass ){
- sqlite3VdbeAddOp2(v, OP_RowSetAdd, regRowSet, regOldRowid);
- }
-
- /* End the database scan loop.
- */
- sqlite3WhereEnd(pWInfo);
}else{
- int iPk; /* First of nPk memory cells holding PRIMARY KEY value */
- i16 nPk; /* Number of components of the PRIMARY KEY */
- int addrOpen; /* Address of the OpenEphemeral instruction */
-
assert( pPk!=0 );
nPk = pPk->nKeyCol;
iPk = pParse->nMem+1;
pParse->nMem += nPk;
regKey = ++pParse->nMem;
iEph = pParse->nTab++;
+
sqlite3VdbeAddOp2(v, OP_Null, 0, iPk);
addrOpen = sqlite3VdbeAddOp2(v, OP_OpenEphemeral, iEph, nPk);
sqlite3VdbeSetP4KeyInfo(pParse, pPk);
- pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, 0, 0,
- WHERE_ONEPASS_DESIRED, iIdxCur);
- if( pWInfo==0 ) goto update_cleanup;
- okOnePass = sqlite3WhereOkOnePass(pWInfo, aiCurOnePass);
+ }
+
+ /* Begin the database scan.
+ **
+ ** Do not consider a single-pass strategy for a multi-row update if
+ ** there are any triggers or foreign keys to process, or rows may
+ ** be deleted as a result of REPLACE conflict handling. Any of these
+ ** things might disturb a cursor being used to scan through the table
+ ** or index, causing a single-pass approach to malfunction. */
+ flags = WHERE_ONEPASS_DESIRED|WHERE_SEEK_UNIQ_TABLE;
+ if( !pParse->nested && !pTrigger && !hasFK && !chngKey && !bReplace ){
+ flags |= WHERE_ONEPASS_MULTIROW;
+ }
+ pWInfo = sqlite3WhereBegin(pParse, pTabList, pWhere, 0, 0, flags, iIdxCur);
+ if( pWInfo==0 ) goto update_cleanup;
+
+ /* A one-pass strategy that might update more than one row may not
+ ** be used if any column of the index used for the scan is being
+ ** updated. Otherwise, if there is an index on "b", statements like
+ ** the following could create an infinite loop:
+ **
+ ** UPDATE t1 SET b=b+1 WHERE b>?
+ **
+ ** Fall back to ONEPASS_OFF if where.c has selected a ONEPASS_MULTI
+ ** strategy that uses an index for which one or more columns are being
+ ** updated. */
+ eOnePass = sqlite3WhereOkOnePass(pWInfo, aiCurOnePass);
+ if( eOnePass==ONEPASS_MULTI ){
+ int iCur = aiCurOnePass[1];
+ if( iCur>=0 && iCur!=iDataCur && aToOpen[iCur-iBaseCur] ){
+ eOnePass = ONEPASS_OFF;
+ }
+ assert( iCur!=iDataCur || !HasRowid(pTab) );
+ }
+
+ if( HasRowid(pTab) ){
+ /* Read the rowid of the current row of the WHERE scan. In ONEPASS_OFF
+ ** mode, write the rowid into the FIFO. In either of the one-pass modes,
+ ** leave it in register regOldRowid. */
+ sqlite3VdbeAddOp2(v, OP_Rowid, iDataCur, regOldRowid);
+ if( eOnePass==ONEPASS_OFF ){
+ sqlite3VdbeAddOp2(v, OP_RowSetAdd, regRowSet, regOldRowid);
+ }
+ }else{
+ /* Read the PK of the current row into an array of registers. In
+ ** ONEPASS_OFF mode, serialize the array into a record and store it in
+ ** the ephemeral table. Or, in ONEPASS_SINGLE or MULTI mode, change
+ ** the OP_OpenEphemeral instruction to a Noop (the ephemeral table
+ ** is not required) and leave the PK fields in the array of registers. */
for(i=0; i<nPk; i++){
assert( pPk->aiColumn[i]>=0 );
- sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur, pPk->aiColumn[i],
- iPk+i);
+ sqlite3ExprCodeGetColumnOfTable(v, pTab, iDataCur,pPk->aiColumn[i],iPk+i);
}
- if( okOnePass ){
+ if( eOnePass ){
sqlite3VdbeChangeToNoop(v, addrOpen);
nKey = nPk;
regKey = iPk;
}else{
sqlite3VdbeAddOp4(v, OP_MakeRecord, iPk, nPk, regKey,
sqlite3IndexAffinityStr(db, pPk), nPk);
- sqlite3VdbeAddOp2(v, OP_IdxInsert, iEph, regKey);
+ sqlite3VdbeAddOp4Int(v, OP_IdxInsert, iEph, regKey, iPk, nPk);
}
- sqlite3WhereEnd(pWInfo);
}
- /* Initialize the count of updated rows
- */
- if( (db->flags & SQLITE_CountRows) && !pParse->pTriggerTab ){
- regRowCount = ++pParse->nMem;
- sqlite3VdbeAddOp2(v, OP_Integer, 0, regRowCount);
+ if( eOnePass!=ONEPASS_MULTI ){
+ sqlite3WhereEnd(pWInfo);
}
labelBreak = sqlite3VdbeMakeLabel(v);
if( !isView ){
- /*
- ** Open every index that needs updating. Note that if any
- ** index could potentially invoke a REPLACE conflict resolution
- ** action, then we need to open all indices because we might need
- ** to be deleting some records.
- */
- if( onError==OE_Replace ){
- memset(aToOpen, 1, nIdx+1);
- }else{
- for(pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext){
- if( pIdx->onError==OE_Replace ){
- memset(aToOpen, 1, nIdx+1);
- break;
- }
- }
- }
- if( okOnePass ){
+ int addrOnce = 0;
+
+ /* Open every index that needs updating. */
+ if( eOnePass!=ONEPASS_OFF ){
if( aiCurOnePass[0]>=0 ) aToOpen[aiCurOnePass[0]-iBaseCur] = 0;
if( aiCurOnePass[1]>=0 ) aToOpen[aiCurOnePass[1]-iBaseCur] = 0;
}
+
+ if( eOnePass==ONEPASS_MULTI && (nIdx-(aiCurOnePass[1]>=0))>0 ){
+ addrOnce = sqlite3VdbeAddOp0(v, OP_Once); VdbeCoverage(v);
+ }
sqlite3OpenTableAndIndices(pParse, pTab, OP_OpenWrite, 0, iBaseCur, aToOpen,
0, 0);
+ if( addrOnce ) sqlite3VdbeJumpHere(v, addrOnce);
}
/* Top of the update loop */
- if( okOnePass ){
- if( aToOpen[iDataCur-iBaseCur] && !isView ){
+ if( eOnePass!=ONEPASS_OFF ){
+ if( !isView && aiCurOnePass[0]!=iDataCur && aiCurOnePass[1]!=iDataCur ){
assert( pPk );
sqlite3VdbeAddOp4Int(v, OP_NotFound, iDataCur, labelBreak, regKey, nKey);
VdbeCoverageNeverTaken(v);
}
- labelContinue = labelBreak;
+ if( eOnePass==ONEPASS_SINGLE ){
+ labelContinue = labelBreak;
+ }else{
+ labelContinue = sqlite3VdbeMakeLabel(v);
+ }
sqlite3VdbeAddOp2(v, OP_IsNull, pPk ? regKey : regOldRowid, labelBreak);
VdbeCoverageIf(v, pPk==0);
VdbeCoverageIf(v, pPk!=0);
}else if( pPk ){
labelContinue = sqlite3VdbeMakeLabel(v);
sqlite3VdbeAddOp2(v, OP_Rewind, iEph, labelBreak); VdbeCoverage(v);
- addrTop = sqlite3VdbeAddOp2(v, OP_RowKey, iEph, regKey);
+ addrTop = sqlite3VdbeAddOp2(v, OP_RowData, iEph, regKey);
sqlite3VdbeAddOp4Int(v, OP_NotFound, iDataCur, labelContinue, regKey, 0);
VdbeCoverage(v);
}else{
@@ -121923,7 +123422,6 @@ SQLITE_PRIVATE void sqlite3Update(
if( !isView ){
int addr1 = 0; /* Address of jump instruction */
- int bReplace = 0; /* True if REPLACE conflict resolution might happen */
/* Do constraint checks. */
assert( regOldRowid>0 );
@@ -121959,14 +123457,18 @@ SQLITE_PRIVATE void sqlite3Update(
assert( regNew==regNewRowid+1 );
#ifdef SQLITE_ENABLE_PREUPDATE_HOOK
sqlite3VdbeAddOp3(v, OP_Delete, iDataCur,
- OPFLAG_ISUPDATE | ((hasFK || chngKey || pPk!=0) ? 0 : OPFLAG_ISNOOP),
+ OPFLAG_ISUPDATE | ((hasFK || chngKey) ? 0 : OPFLAG_ISNOOP),
regNewRowid
);
+ if( eOnePass==ONEPASS_MULTI ){
+ assert( hasFK==0 && chngKey==0 );
+ sqlite3VdbeChangeP5(v, OPFLAG_SAVEPOSITION);
+ }
if( !pParse->nested ){
- sqlite3VdbeChangeP4(v, -1, (char*)pTab, P4_TABLE);
+ sqlite3VdbeAppendP4(v, pTab, P4_TABLE);
}
#else
- if( hasFK || chngKey || pPk!=0 ){
+ if( hasFK || chngKey ){
sqlite3VdbeAddOp2(v, OP_Delete, iDataCur, 0);
}
#endif
@@ -121979,8 +123481,11 @@ SQLITE_PRIVATE void sqlite3Update(
}
/* Insert the new index entries and the new record. */
- sqlite3CompleteInsertion(pParse, pTab, iDataCur, iIdxCur,
- regNewRowid, aRegIdx, 1, 0, 0);
+ sqlite3CompleteInsertion(
+ pParse, pTab, iDataCur, iIdxCur, regNewRowid, aRegIdx,
+ OPFLAG_ISUPDATE | (eOnePass==ONEPASS_MULTI ? OPFLAG_SAVEPOSITION : 0),
+ 0, 0
+ );
/* Do any ON CASCADE, SET NULL or SET DEFAULT operations required to
** handle rows (possibly in other tables) that refer via a foreign key
@@ -122002,8 +123507,11 @@ SQLITE_PRIVATE void sqlite3Update(
/* Repeat the above with the next record to be updated, until
** all record selected by the WHERE clause have been updated.
*/
- if( okOnePass ){
+ if( eOnePass==ONEPASS_SINGLE ){
/* Nothing to do at end-of-loop for a single-pass */
+ }else if( eOnePass==ONEPASS_MULTI ){
+ sqlite3VdbeResolveLabel(v, labelContinue);
+ sqlite3WhereEnd(pWInfo);
}else if( pPk ){
sqlite3VdbeResolveLabel(v, labelContinue);
sqlite3VdbeAddOp2(v, OP_Next, iEph, addrTop); VdbeCoverage(v);
@@ -122012,15 +123520,6 @@ SQLITE_PRIVATE void sqlite3Update(
}
sqlite3VdbeResolveLabel(v, labelBreak);
- /* Close all tables */
- for(i=0, pIdx=pTab->pIndex; pIdx; pIdx=pIdx->pNext, i++){
- assert( aRegIdx );
- if( aToOpen[i+1] ){
- sqlite3VdbeAddOp2(v, OP_Close, iIdxCur+i, 0);
- }
- }
- if( iDataCur<iIdxCur ) sqlite3VdbeAddOp2(v, OP_Close, iDataCur, 0);
-
/* Update the sqlite_sequence table by storing the content of the
** maximum rowid counter values recorded while inserting into
** autoincrement tables.
@@ -122571,6 +124070,41 @@ struct VtabCtx {
};
/*
+** Construct and install a Module object for a virtual table. When this
+** routine is called, it is guaranteed that all appropriate locks are held
+** and the module is not already part of the connection.
+*/
+SQLITE_PRIVATE Module *sqlite3VtabCreateModule(
+ sqlite3 *db, /* Database in which module is registered */
+ const char *zName, /* Name assigned to this module */
+ const sqlite3_module *pModule, /* The definition of the module */
+ void *pAux, /* Context pointer for xCreate/xConnect */
+ void (*xDestroy)(void *) /* Module destructor function */
+){
+ Module *pMod;
+ int nName = sqlite3Strlen30(zName);
+ pMod = (Module *)sqlite3DbMallocRawNN(db, sizeof(Module) + nName + 1);
+ if( pMod ){
+ Module *pDel;
+ char *zCopy = (char *)(&pMod[1]);
+ memcpy(zCopy, zName, nName+1);
+ pMod->zName = zCopy;
+ pMod->pModule = pModule;
+ pMod->pAux = pAux;
+ pMod->xDestroy = xDestroy;
+ pMod->pEpoTab = 0;
+ pDel = (Module *)sqlite3HashInsert(&db->aModule,zCopy,(void*)pMod);
+ assert( pDel==0 || pDel==pMod );
+ if( pDel ){
+ sqlite3OomFault(db);
+ sqlite3DbFree(db, pDel);
+ pMod = 0;
+ }
+ }
+ return pMod;
+}
+
+/*
** The actual function that does the work of creating a new module.
** This function implements the sqlite3_create_module() and
** sqlite3_create_module_v2() interfaces.
@@ -122583,35 +124117,15 @@ static int createModule(
void (*xDestroy)(void *) /* Module destructor function */
){
int rc = SQLITE_OK;
- int nName;
sqlite3_mutex_enter(db->mutex);
- nName = sqlite3Strlen30(zName);
if( sqlite3HashFind(&db->aModule, zName) ){
rc = SQLITE_MISUSE_BKPT;
}else{
- Module *pMod;
- pMod = (Module *)sqlite3DbMallocRawNN(db, sizeof(Module) + nName + 1);
- if( pMod ){
- Module *pDel;
- char *zCopy = (char *)(&pMod[1]);
- memcpy(zCopy, zName, nName+1);
- pMod->zName = zCopy;
- pMod->pModule = pModule;
- pMod->pAux = pAux;
- pMod->xDestroy = xDestroy;
- pMod->pEpoTab = 0;
- pDel = (Module *)sqlite3HashInsert(&db->aModule,zCopy,(void*)pMod);
- assert( pDel==0 || pDel==pMod );
- if( pDel ){
- sqlite3OomFault(db);
- sqlite3DbFree(db, pDel);
- }
- }
+ (void)sqlite3VtabCreateModule(db, zName, pModule, pAux, xDestroy);
}
rc = sqlite3ApiExit(db, rc);
if( rc!=SQLITE_OK && xDestroy ) xDestroy(pAux);
-
sqlite3_mutex_leave(db->mutex);
return rc;
}
@@ -122950,7 +124464,7 @@ SQLITE_PRIVATE void sqlite3VtabFinishParse(Parse *pParse, Token *pEnd){
"UPDATE %Q.%s "
"SET type='table', name=%Q, tbl_name=%Q, rootpage=0, sql=%Q "
"WHERE rowid=#%d",
- db->aDb[iDb].zDbSName, SCHEMA_TABLE(iDb),
+ db->aDb[iDb].zDbSName, MASTER_NAME,
pTab->zName,
pTab->zName,
zStmt,
@@ -123675,7 +125189,7 @@ SQLITE_PRIVATE int sqlite3VtabEponymousTableInit(Parse *pParse, Module *pMod){
return 0;
}
pMod->pEpoTab = pTab;
- pTab->nRef = 1;
+ pTab->nTabRef = 1;
pTab->pSchema = db->aDb[0].pSchema;
pTab->tabFlags |= TF_Virtual;
pTab->nModuleArg = 0;
@@ -124772,6 +126286,7 @@ static int codeEqualityTerm(
}else{
Select *pSelect = pX->x.pSelect;
sqlite3 *db = pParse->db;
+ u16 savedDbOptFlags = db->dbOptFlags;
ExprList *pOrigRhs = pSelect->pEList;
ExprList *pOrigLhs = pX->pLeft->x.pList;
ExprList *pRhs = 0; /* New Select.pEList for RHS */
@@ -124815,7 +126330,9 @@ static int codeEqualityTerm(
testcase( aiMap==0 );
}
pSelect->pEList = pRhs;
+ db->dbOptFlags |= SQLITE_QueryFlattener;
eType = sqlite3FindInIndex(pParse, pX, IN_INDEX_LOOP, 0, aiMap);
+ db->dbOptFlags = savedDbOptFlags;
testcase( aiMap!=0 && aiMap[0]!=0 );
pSelect->pEList = pOrigRhs;
pLeft->x.pList = pOrigLhs;
@@ -125465,7 +126982,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
sqlite3VdbeAddOp2(v, OP_Integer, nConstraint, iReg+1);
sqlite3VdbeAddOp4(v, OP_VFilter, iCur, addrNotFound, iReg,
pLoop->u.vtab.idxStr,
- pLoop->u.vtab.needFree ? P4_MPRINTF : P4_STATIC);
+ pLoop->u.vtab.needFree ? P4_DYNAMIC : P4_STATIC);
VdbeCoverage(v);
pLoop->u.vtab.needFree = 0;
pLevel->p1 = iCur;
@@ -125498,7 +127015,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
/* Generate code that will continue to the next row if
** the IN constraint is not satisfied */
- pCompare = sqlite3PExpr(pParse, TK_EQ, 0, 0, 0);
+ pCompare = sqlite3PExpr(pParse, TK_EQ, 0, 0);
assert( pCompare!=0 || db->mallocFailed );
if( pCompare ){
pCompare->pLeft = pTerm->pExpr->pLeft;
@@ -125914,7 +127431,10 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
if( omitTable ){
/* pIdx is a covering index. No need to access the main table. */
}else if( HasRowid(pIdx->pTable) ){
- if( (pWInfo->wctrlFlags & WHERE_SEEK_TABLE)!=0 ){
+ if( (pWInfo->wctrlFlags & WHERE_SEEK_TABLE) || (
+ (pWInfo->wctrlFlags & WHERE_SEEK_UNIQ_TABLE)
+ && (pWInfo->eOnePass==ONEPASS_SINGLE)
+ )){
iRowidReg = ++pParse->nMem;
sqlite3VdbeAddOp2(v, OP_IdxRowid, iIdxCur, iRowidReg);
sqlite3ExprCacheStore(pParse, iCur, -1, iRowidReg);
@@ -126097,7 +127617,7 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
pAndExpr = sqlite3ExprAnd(db, pAndExpr, pExpr);
}
if( pAndExpr ){
- pAndExpr = sqlite3PExpr(pParse, TK_AND|TKFLG_DONTFOLD, 0, pAndExpr, 0);
+ pAndExpr = sqlite3PExpr(pParse, TK_AND|TKFLG_DONTFOLD, 0, pAndExpr);
}
}
@@ -126170,7 +127690,8 @@ SQLITE_PRIVATE Bitmask sqlite3WhereCodeOneLoopStart(
}
if( iSet>=0 ){
sqlite3VdbeAddOp3(v, OP_MakeRecord, r, nPk, regRowid);
- sqlite3VdbeAddOp3(v, OP_IdxInsert, regRowset, regRowid, 0);
+ sqlite3VdbeAddOp4Int(v, OP_IdxInsert, regRowset, regRowid,
+ r, nPk);
if( iSet ) sqlite3VdbeChangeP5(v, OPFLAG_USESEEKRESULT);
}
@@ -127098,7 +128619,7 @@ static void exprAnalyzeOrTerm(
}
assert( pLeft!=0 );
pDup = sqlite3ExprDup(db, pLeft, 0);
- pNew = sqlite3PExpr(pParse, TK_IN, pDup, 0, 0);
+ pNew = sqlite3PExpr(pParse, TK_IN, pDup, 0);
if( pNew ){
int idxNew;
transferJoinMarkings(pNew, pExpr);
@@ -127277,6 +128798,7 @@ static void exprAnalyze(
Parse *pParse = pWInfo->pParse; /* Parsing context */
sqlite3 *db = pParse->db; /* Database connection */
unsigned char eOp2; /* op2 value for LIKE/REGEXP/GLOB */
+ int nLeft; /* Number of elements on left side vector */
if( db->mallocFailed ){
return;
@@ -127306,6 +128828,10 @@ static void exprAnalyze(
prereqAll |= x;
extraRight = x-1; /* ON clause terms may not be used with an index
** on left table of a LEFT JOIN. Ticket #3015 */
+ if( (prereqAll>>1)>=x ){
+ sqlite3ErrorMsg(pParse, "ON clause references tables to its right");
+ return;
+ }
}
pTerm->prereqAll = prereqAll;
pTerm->leftCursor = -1;
@@ -127396,7 +128922,7 @@ static void exprAnalyze(
int idxNew;
pNewExpr = sqlite3PExpr(pParse, ops[i],
sqlite3ExprDup(db, pExpr->pLeft, 0),
- sqlite3ExprDup(db, pList->a[i].pExpr, 0), 0);
+ sqlite3ExprDup(db, pList->a[i].pExpr, 0));
transferJoinMarkings(pNewExpr, pExpr);
idxNew = whereClauseInsert(pWC, pNewExpr, TERM_VIRTUAL|TERM_DYNAMIC);
testcase( idxNew==0 );
@@ -127481,7 +129007,7 @@ static void exprAnalyze(
pNewExpr1 = sqlite3ExprDup(db, pLeft, 0);
pNewExpr1 = sqlite3PExpr(pParse, TK_GE,
sqlite3ExprAddCollateString(pParse,pNewExpr1,zCollSeqName),
- pStr1, 0);
+ pStr1);
transferJoinMarkings(pNewExpr1, pExpr);
idxNew1 = whereClauseInsert(pWC, pNewExpr1, wtFlags);
testcase( idxNew1==0 );
@@ -127489,7 +129015,7 @@ static void exprAnalyze(
pNewExpr2 = sqlite3ExprDup(db, pLeft, 0);
pNewExpr2 = sqlite3PExpr(pParse, TK_LT,
sqlite3ExprAddCollateString(pParse,pNewExpr2,zCollSeqName),
- pStr2, 0);
+ pStr2);
transferJoinMarkings(pNewExpr2, pExpr);
idxNew2 = whereClauseInsert(pWC, pNewExpr2, wtFlags);
testcase( idxNew2==0 );
@@ -127522,7 +129048,7 @@ static void exprAnalyze(
if( (prereqExpr & prereqColumn)==0 ){
Expr *pNewExpr;
pNewExpr = sqlite3PExpr(pParse, TK_MATCH,
- 0, sqlite3ExprDup(db, pRight, 0), 0);
+ 0, sqlite3ExprDup(db, pRight, 0));
idxNew = whereClauseInsert(pWC, pNewExpr, TERM_VIRTUAL|TERM_DYNAMIC);
testcase( idxNew==0 );
pNewTerm = &pWC->a[idxNew];
@@ -127548,20 +129074,19 @@ static void exprAnalyze(
** is not a sub-select. */
if( pWC->op==TK_AND
&& (pExpr->op==TK_EQ || pExpr->op==TK_IS)
- && sqlite3ExprIsVector(pExpr->pLeft)
+ && (nLeft = sqlite3ExprVectorSize(pExpr->pLeft))>1
+ && sqlite3ExprVectorSize(pExpr->pRight)==nLeft
&& ( (pExpr->pLeft->flags & EP_xIsSelect)==0
- || (pExpr->pRight->flags & EP_xIsSelect)==0
- )){
- int nLeft = sqlite3ExprVectorSize(pExpr->pLeft);
+ || (pExpr->pRight->flags & EP_xIsSelect)==0)
+ ){
int i;
- assert( nLeft==sqlite3ExprVectorSize(pExpr->pRight) );
for(i=0; i<nLeft; i++){
int idxNew;
Expr *pNew;
Expr *pLeft = sqlite3ExprForVectorField(pParse, pExpr->pLeft, i);
Expr *pRight = sqlite3ExprForVectorField(pParse, pExpr->pRight, i);
- pNew = sqlite3PExpr(pParse, pExpr->op, pLeft, pRight, 0);
+ pNew = sqlite3PExpr(pParse, pExpr->op, pLeft, pRight);
transferJoinMarkings(pNew, pExpr);
idxNew = whereClauseInsert(pWC, pNew, TERM_DYNAMIC);
exprAnalyze(pSrc, pWC, idxNew);
@@ -127613,7 +129138,7 @@ static void exprAnalyze(
pNewExpr = sqlite3PExpr(pParse, TK_GT,
sqlite3ExprDup(db, pLeft, 0),
- sqlite3ExprAlloc(db, TK_NULL, 0, 0), 0);
+ sqlite3ExprAlloc(db, TK_NULL, 0, 0));
idxNew = whereClauseInsert(pWC, pNewExpr,
TERM_VIRTUAL|TERM_DYNAMIC|TERM_VNULL);
@@ -127634,6 +129159,8 @@ static void exprAnalyze(
/* Prevent ON clause terms of a LEFT JOIN from being used to drive
** an index for tables to the left of the join.
*/
+ testcase( pTerm!=&pWC->a[idxTerm] );
+ pTerm = &pWC->a[idxTerm];
pTerm->prereqRight |= extraRight;
}
@@ -127797,7 +129324,7 @@ SQLITE_PRIVATE void sqlite3WhereTabFuncArgs(
pColRef->iColumn = k++;
pColRef->pTab = pTab;
pTerm = sqlite3PExpr(pParse, TK_EQ, pColRef,
- sqlite3ExprDup(pParse->db, pArgs->a[j].pExpr, 0), 0);
+ sqlite3ExprDup(pParse->db, pArgs->a[j].pExpr, 0));
whereClauseInsert(pWC, pTerm, TERM_DYNAMIC);
}
}
@@ -128004,11 +129531,13 @@ static WhereTerm *whereScanNext(WhereScan *pScan){
WhereTerm *pTerm; /* The term being tested */
int k = pScan->k; /* Where to start scanning */
- while( pScan->iEquiv<=pScan->nEquiv ){
- iCur = pScan->aiCur[pScan->iEquiv-1];
+ assert( pScan->iEquiv<=pScan->nEquiv );
+ pWC = pScan->pWC;
+ while(1){
iColumn = pScan->aiColumn[pScan->iEquiv-1];
- if( iColumn==XN_EXPR && pScan->pIdxExpr==0 ) return 0;
- while( (pWC = pScan->pWC)!=0 ){
+ iCur = pScan->aiCur[pScan->iEquiv-1];
+ assert( pWC!=0 );
+ do{
for(pTerm=pWC->a+k; k<pWC->nTerm; k++, pTerm++){
if( pTerm->leftCursor==iCur
&& pTerm->u.leftColumn==iColumn
@@ -128058,15 +129587,17 @@ static WhereTerm *whereScanNext(WhereScan *pScan){
testcase( pTerm->eOperator & WO_IS );
continue;
}
+ pScan->pWC = pWC;
pScan->k = k+1;
return pTerm;
}
}
}
- pScan->pWC = pScan->pWC->pOuter;
+ pWC = pWC->pOuter;
k = 0;
- }
- pScan->pWC = pScan->pOrigWC;
+ }while( pWC!=0 );
+ if( pScan->iEquiv>=pScan->nEquiv ) break;
+ pWC = pScan->pOrigWC;
k = 0;
pScan->iEquiv++;
}
@@ -128100,24 +129631,25 @@ static WhereTerm *whereScanInit(
u32 opMask, /* Operator(s) to scan for */
Index *pIdx /* Must be compatible with this index */
){
- int j = 0;
-
- /* memset(pScan, 0, sizeof(*pScan)); */
pScan->pOrigWC = pWC;
pScan->pWC = pWC;
pScan->pIdxExpr = 0;
+ pScan->idxaff = 0;
+ pScan->zCollName = 0;
if( pIdx ){
- j = iColumn;
+ int j = iColumn;
iColumn = pIdx->aiColumn[j];
- if( iColumn==XN_EXPR ) pScan->pIdxExpr = pIdx->aColExpr->a[j].pExpr;
- if( iColumn==pIdx->pTable->iPKey ) iColumn = XN_ROWID;
- }
- if( pIdx && iColumn>=0 ){
- pScan->idxaff = pIdx->pTable->aCol[iColumn].affinity;
- pScan->zCollName = pIdx->azColl[j];
- }else{
- pScan->idxaff = 0;
- pScan->zCollName = 0;
+ if( iColumn==XN_EXPR ){
+ pScan->pIdxExpr = pIdx->aColExpr->a[j].pExpr;
+ pScan->zCollName = pIdx->azColl[j];
+ }else if( iColumn==pIdx->pTable->iPKey ){
+ iColumn = XN_ROWID;
+ }else if( iColumn>=0 ){
+ pScan->idxaff = pIdx->pTable->aCol[iColumn].affinity;
+ pScan->zCollName = pIdx->azColl[j];
+ }
+ }else if( iColumn==XN_EXPR ){
+ return 0;
}
pScan->opMask = opMask;
pScan->k = 0;
@@ -132713,27 +134245,6 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){
continue;
}
- /* Close all of the cursors that were opened by sqlite3WhereBegin.
- ** Except, do not close cursors that will be reused by the OR optimization
- ** (WHERE_OR_SUBCLAUSE). And do not close the OP_OpenWrite cursors
- ** created for the ONEPASS optimization.
- */
- if( (pTab->tabFlags & TF_Ephemeral)==0
- && pTab->pSelect==0
- && (pWInfo->wctrlFlags & WHERE_OR_SUBCLAUSE)==0
- ){
- int ws = pLoop->wsFlags;
- if( pWInfo->eOnePass==ONEPASS_OFF && (ws & WHERE_IDX_ONLY)==0 ){
- sqlite3VdbeAddOp1(v, OP_Close, pTabItem->iCursor);
- }
- if( (ws & WHERE_INDEXED)!=0
- && (ws & (WHERE_IPK|WHERE_AUTO_INDEX))==0
- && pLevel->iIdxCur!=pWInfo->aiCurOnePass[1]
- ){
- sqlite3VdbeAddOp1(v, OP_Close, pLevel->iIdxCur);
- }
- }
-
/* If this scan uses an index, make VDBE code substitutions to read data
** from the index instead of from the table where possible. In some cases
** this optimization prevents the table from ever being read, which can
@@ -132772,7 +134283,8 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){
pOp->p2 = x;
pOp->p1 = pLevel->iIdxCur;
}
- assert( (pLoop->wsFlags & WHERE_IDX_ONLY)==0 || x>=0 );
+ assert( (pLoop->wsFlags & WHERE_IDX_ONLY)==0 || x>=0
+ || pWInfo->eOnePass );
}else if( pOp->opcode==OP_Rowid ){
pOp->p1 = pLevel->iIdxCur;
pOp->opcode = OP_IdxRowid;
@@ -132837,6 +134349,19 @@ SQLITE_PRIVATE void sqlite3WhereEnd(WhereInfo *pWInfo){
#define YYPARSEFREENEVERNULL 1
/*
+** In the amalgamation, the parse.c file generated by lemon and the
+** tokenize.c file are concatenated. In that case, sqlite3RunParser()
+** has access to the the size of the yyParser object and so the parser
+** engine can be allocated from stack. In that case, only the
+** sqlite3ParserInit() and sqlite3ParserFinalize() routines are invoked
+** and the sqlite3ParserAlloc() and sqlite3ParserFree() routines can be
+** omitted.
+*/
+#ifdef SQLITE_AMALGAMATION
+# define sqlite3Parser_ENGINEALWAYSONSTACK 1
+#endif
+
+/*
** Alternative datatype for the argument to the malloc() routine passed
** into sqlite3ParserAlloc(). The default is size_t.
*/
@@ -132939,7 +134464,7 @@ static void disableLookaside(Parse *pParse){
ExprSpan *pLeft, /* The left operand, and output */
ExprSpan *pRight /* The right operand */
){
- pLeft->pExpr = sqlite3PExpr(pParse, op, pLeft->pExpr, pRight->pExpr, 0);
+ pLeft->pExpr = sqlite3PExpr(pParse, op, pLeft->pExpr, pRight->pExpr);
pLeft->zEnd = pRight->zEnd;
}
@@ -132948,7 +134473,7 @@ static void disableLookaside(Parse *pParse){
*/
static void exprNot(Parse *pParse, int doNot, ExprSpan *pSpan){
if( doNot ){
- pSpan->pExpr = sqlite3PExpr(pParse, TK_NOT, pSpan->pExpr, 0, 0);
+ pSpan->pExpr = sqlite3PExpr(pParse, TK_NOT, pSpan->pExpr, 0);
}
}
@@ -132960,7 +134485,7 @@ static void disableLookaside(Parse *pParse){
ExprSpan *pOperand, /* The operand, and output */
Token *pPostOp /* The operand token for setting the span */
){
- pOperand->pExpr = sqlite3PExpr(pParse, op, pOperand->pExpr, 0, 0);
+ pOperand->pExpr = sqlite3PExpr(pParse, op, pOperand->pExpr, 0);
pOperand->zEnd = &pPostOp->z[pPostOp->n];
}
@@ -132985,7 +134510,7 @@ static void disableLookaside(Parse *pParse){
Token *pPreOp /* The operand token for setting the span */
){
pOut->zStart = pPreOp->z;
- pOut->pExpr = sqlite3PExpr(pParse, op, pOperand->pExpr, 0, 0);
+ pOut->pExpr = sqlite3PExpr(pParse, op, pOperand->pExpr, 0);
pOut->zEnd = pOperand->zEnd;
}
@@ -134284,6 +135809,31 @@ static int yyGrowStack(yyParser *p){
# define YYMALLOCARGTYPE size_t
#endif
+/* Initialize a new parser that has already been allocated.
+*/
+SQLITE_PRIVATE void sqlite3ParserInit(void *yypParser){
+ yyParser *pParser = (yyParser*)yypParser;
+#ifdef YYTRACKMAXSTACKDEPTH
+ pParser->yyhwm = 0;
+#endif
+#if YYSTACKDEPTH<=0
+ pParser->yytos = NULL;
+ pParser->yystack = NULL;
+ pParser->yystksz = 0;
+ if( yyGrowStack(pParser) ){
+ pParser->yystack = &pParser->yystk0;
+ pParser->yystksz = 1;
+ }
+#endif
+#ifndef YYNOERRORRECOVERY
+ pParser->yyerrcnt = -1;
+#endif
+ pParser->yytos = pParser->yystack;
+ pParser->yystack[0].stateno = 0;
+ pParser->yystack[0].major = 0;
+}
+
+#ifndef sqlite3Parser_ENGINEALWAYSONSTACK
/*
** This function allocates a new parser.
** The only argument is a pointer to a function which works like
@@ -134299,28 +135849,11 @@ static int yyGrowStack(yyParser *p){
SQLITE_PRIVATE void *sqlite3ParserAlloc(void *(*mallocProc)(YYMALLOCARGTYPE)){
yyParser *pParser;
pParser = (yyParser*)(*mallocProc)( (YYMALLOCARGTYPE)sizeof(yyParser) );
- if( pParser ){
-#ifdef YYTRACKMAXSTACKDEPTH
- pParser->yyhwm = 0;
-#endif
-#if YYSTACKDEPTH<=0
- pParser->yytos = NULL;
- pParser->yystack = NULL;
- pParser->yystksz = 0;
- if( yyGrowStack(pParser) ){
- pParser->yystack = &pParser->yystk0;
- pParser->yystksz = 1;
- }
-#endif
-#ifndef YYNOERRORRECOVERY
- pParser->yyerrcnt = -1;
-#endif
- pParser->yytos = pParser->yystack;
- pParser->yystack[0].stateno = 0;
- pParser->yystack[0].major = 0;
- }
+ if( pParser ) sqlite3ParserInit(pParser);
return pParser;
}
+#endif /* sqlite3Parser_ENGINEALWAYSONSTACK */
+
/* The following function deletes the "minor type" or semantic value
** associated with a symbol. The symbol can be either a terminal
@@ -134446,6 +135979,18 @@ static void yy_pop_parser_stack(yyParser *pParser){
yy_destructor(pParser, yytos->major, &yytos->minor);
}
+/*
+** Clear all secondary memory allocations from the parser
+*/
+SQLITE_PRIVATE void sqlite3ParserFinalize(void *p){
+ yyParser *pParser = (yyParser*)p;
+ while( pParser->yytos>pParser->yystack ) yy_pop_parser_stack(pParser);
+#if YYSTACKDEPTH<=0
+ if( pParser->yystack!=&pParser->yystk0 ) free(pParser->yystack);
+#endif
+}
+
+#ifndef sqlite3Parser_ENGINEALWAYSONSTACK
/*
** Deallocate and destroy a parser. Destructors are called for
** all stack elements before shutting the parser down.
@@ -134458,16 +136003,13 @@ SQLITE_PRIVATE void sqlite3ParserFree(
void *p, /* The parser to be deleted */
void (*freeProc)(void*) /* Function used to reclaim memory */
){
- yyParser *pParser = (yyParser*)p;
#ifndef YYPARSEFREENEVERNULL
- if( pParser==0 ) return;
-#endif
- while( pParser->yytos>pParser->yystack ) yy_pop_parser_stack(pParser);
-#if YYSTACKDEPTH<=0
- if( pParser->yystack!=&pParser->yystk0 ) free(pParser->yystack);
+ if( p==0 ) return;
#endif
- (*freeProc)((void*)pParser);
+ sqlite3ParserFinalize(p);
+ (*freeProc)(p);
}
+#endif /* sqlite3Parser_ENGINEALWAYSONSTACK */
/*
** Return the peak depth of the stack for a parser.
@@ -134578,7 +136120,6 @@ static int yy_find_reduce_action(
*/
static void yyStackOverflow(yyParser *yypParser){
sqlite3ParserARG_FETCH;
- yypParser->yytos--;
#ifndef NDEBUG
if( yyTraceFILE ){
fprintf(yyTraceFILE,"%sStack Overflow!\n",yyTracePrompt);
@@ -134633,12 +136174,14 @@ static void yy_shift(
#endif
#if YYSTACKDEPTH>0
if( yypParser->yytos>=&yypParser->yystack[YYSTACKDEPTH] ){
+ yypParser->yytos--;
yyStackOverflow(yypParser);
return;
}
#else
if( yypParser->yytos>=&yypParser->yystack[yypParser->yystksz] ){
if( yyGrowStack(yypParser) ){
+ yypParser->yytos--;
yyStackOverflow(yypParser);
return;
}
@@ -135180,7 +136723,7 @@ static void yy_reduce(
case 33: /* ccons ::= DEFAULT MINUS term */
{
ExprSpan v;
- v.pExpr = sqlite3PExpr(pParse, TK_UMINUS, yymsp[0].minor.yy190.pExpr, 0, 0);
+ v.pExpr = sqlite3PExpr(pParse, TK_UMINUS, yymsp[0].minor.yy190.pExpr, 0);
v.zStart = yymsp[-1].minor.yy0.z;
v.zEnd = yymsp[0].minor.yy190.zEnd;
sqlite3AddDefaultValue(pParse,&v);
@@ -135444,9 +136987,9 @@ static void yy_reduce(
break;
case 94: /* selcollist ::= sclp nm DOT STAR */
{
- Expr *pRight = sqlite3PExpr(pParse, TK_ASTERISK, 0, 0, 0);
- Expr *pLeft = sqlite3PExpr(pParse, TK_ID, 0, 0, &yymsp[-2].minor.yy0);
- Expr *pDot = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight, 0);
+ Expr *pRight = sqlite3PExpr(pParse, TK_ASTERISK, 0, 0);
+ Expr *pLeft = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[-2].minor.yy0, 1);
+ Expr *pDot = sqlite3PExpr(pParse, TK_DOT, pLeft, pRight);
yymsp[-3].minor.yy148 = sqlite3ExprListAppend(pParse,yymsp[-3].minor.yy148, pDot);
}
break;
@@ -135672,7 +137215,7 @@ static void yy_reduce(
Expr *temp1 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[-2].minor.yy0, 1);
Expr *temp2 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[0].minor.yy0, 1);
spanSet(&yymsp[-2].minor.yy190,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/
- yymsp[-2].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_DOT, temp1, temp2, 0);
+ yymsp[-2].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_DOT, temp1, temp2);
}
break;
case 155: /* expr ::= nm DOT nm DOT nm */
@@ -135680,9 +137223,9 @@ static void yy_reduce(
Expr *temp1 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[-4].minor.yy0, 1);
Expr *temp2 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[-2].minor.yy0, 1);
Expr *temp3 = sqlite3ExprAlloc(pParse->db, TK_ID, &yymsp[0].minor.yy0, 1);
- Expr *temp4 = sqlite3PExpr(pParse, TK_DOT, temp2, temp3, 0);
+ Expr *temp4 = sqlite3PExpr(pParse, TK_DOT, temp2, temp3);
spanSet(&yymsp[-4].minor.yy190,&yymsp[-4].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/
- yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_DOT, temp1, temp4, 0);
+ yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_DOT, temp1, temp4);
}
break;
case 158: /* term ::= INTEGER */
@@ -135711,7 +137254,7 @@ static void yy_reduce(
sqlite3ErrorMsg(pParse, "near \"%T\": syntax error", &t);
yymsp[0].minor.yy190.pExpr = 0;
}else{
- yymsp[0].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_REGISTER, 0, 0, 0);
+ yymsp[0].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_REGISTER, 0, 0);
if( yymsp[0].minor.yy190.pExpr ) sqlite3GetInt32(&t.z[1], &yymsp[0].minor.yy190.pExpr->iTable);
}
}
@@ -135726,7 +137269,8 @@ static void yy_reduce(
case 161: /* expr ::= CAST LP expr AS typetoken RP */
{
spanSet(&yymsp[-5].minor.yy190,&yymsp[-5].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/
- yymsp[-5].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_CAST, yymsp[-3].minor.yy190.pExpr, 0, &yymsp[-1].minor.yy0);
+ yymsp[-5].minor.yy190.pExpr = sqlite3ExprAlloc(pParse->db, TK_CAST, &yymsp[-1].minor.yy0, 1);
+ sqlite3ExprAttachSubtrees(pParse->db, yymsp[-5].minor.yy190.pExpr, yymsp[-3].minor.yy190.pExpr, 0);
}
break;
case 162: /* expr ::= ID|INDEXED LP distinct exprlist RP */
@@ -135759,7 +137303,7 @@ static void yy_reduce(
case 165: /* expr ::= LP nexprlist COMMA expr RP */
{
ExprList *pList = sqlite3ExprListAppend(pParse, yymsp[-3].minor.yy148, yymsp[-1].minor.yy190.pExpr);
- yylhsminor.yy190.pExpr = sqlite3PExpr(pParse, TK_VECTOR, 0, 0, 0);
+ yylhsminor.yy190.pExpr = sqlite3PExpr(pParse, TK_VECTOR, 0, 0);
if( yylhsminor.yy190.pExpr ){
yylhsminor.yy190.pExpr->x.pList = pList;
spanSet(&yylhsminor.yy190, &yymsp[-4].minor.yy0, &yymsp[0].minor.yy0);
@@ -135848,7 +137392,7 @@ static void yy_reduce(
{
ExprList *pList = sqlite3ExprListAppend(pParse,0, yymsp[-2].minor.yy190.pExpr);
pList = sqlite3ExprListAppend(pParse,pList, yymsp[0].minor.yy190.pExpr);
- yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_BETWEEN, yymsp[-4].minor.yy190.pExpr, 0, 0);
+ yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_BETWEEN, yymsp[-4].minor.yy190.pExpr, 0);
if( yymsp[-4].minor.yy190.pExpr ){
yymsp[-4].minor.yy190.pExpr->x.pList = pList;
}else{
@@ -135870,7 +137414,7 @@ static void yy_reduce(
** regardless of the value of expr1.
*/
sqlite3ExprDelete(pParse->db, yymsp[-4].minor.yy190.pExpr);
- yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_INTEGER, 0, 0, &sqlite3IntTokens[yymsp[-3].minor.yy194]);
+ yymsp[-4].minor.yy190.pExpr = sqlite3ExprAlloc(pParse->db, TK_INTEGER,&sqlite3IntTokens[yymsp[-3].minor.yy194],1);
}else if( yymsp[-1].minor.yy148->nExpr==1 ){
/* Expressions of the form:
**
@@ -135897,9 +137441,9 @@ static void yy_reduce(
pRHS->flags &= ~EP_Collate;
pRHS->flags |= EP_Generic;
}
- yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, yymsp[-3].minor.yy194 ? TK_NE : TK_EQ, yymsp[-4].minor.yy190.pExpr, pRHS, 0);
+ yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, yymsp[-3].minor.yy194 ? TK_NE : TK_EQ, yymsp[-4].minor.yy190.pExpr, pRHS);
}else{
- yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy190.pExpr, 0, 0);
+ yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy190.pExpr, 0);
if( yymsp[-4].minor.yy190.pExpr ){
yymsp[-4].minor.yy190.pExpr->x.pList = yymsp[-1].minor.yy148;
sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy190.pExpr);
@@ -135914,13 +137458,13 @@ static void yy_reduce(
case 192: /* expr ::= LP select RP */
{
spanSet(&yymsp[-2].minor.yy190,&yymsp[-2].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-B*/
- yymsp[-2].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_SELECT, 0, 0, 0);
+ yymsp[-2].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_SELECT, 0, 0);
sqlite3PExprAddSelect(pParse, yymsp[-2].minor.yy190.pExpr, yymsp[-1].minor.yy243);
}
break;
case 193: /* expr ::= expr in_op LP select RP */
{
- yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy190.pExpr, 0, 0);
+ yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy190.pExpr, 0);
sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy190.pExpr, yymsp[-1].minor.yy243);
exprNot(pParse, yymsp[-3].minor.yy194, &yymsp[-4].minor.yy190);
yymsp[-4].minor.yy190.zEnd = &yymsp[0].minor.yy0.z[yymsp[0].minor.yy0.n];
@@ -135931,7 +137475,7 @@ static void yy_reduce(
SrcList *pSrc = sqlite3SrcListAppend(pParse->db, 0,&yymsp[-2].minor.yy0,&yymsp[-1].minor.yy0);
Select *pSelect = sqlite3SelectNew(pParse, 0,pSrc,0,0,0,0,0,0,0);
if( yymsp[0].minor.yy148 ) sqlite3SrcListFuncArgs(pParse, pSelect ? pSrc : 0, yymsp[0].minor.yy148);
- yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy190.pExpr, 0, 0);
+ yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_IN, yymsp[-4].minor.yy190.pExpr, 0);
sqlite3PExprAddSelect(pParse, yymsp[-4].minor.yy190.pExpr, pSelect);
exprNot(pParse, yymsp[-3].minor.yy194, &yymsp[-4].minor.yy190);
yymsp[-4].minor.yy190.zEnd = yymsp[-1].minor.yy0.z ? &yymsp[-1].minor.yy0.z[yymsp[-1].minor.yy0.n] : &yymsp[-2].minor.yy0.z[yymsp[-2].minor.yy0.n];
@@ -135941,14 +137485,14 @@ static void yy_reduce(
{
Expr *p;
spanSet(&yymsp[-3].minor.yy190,&yymsp[-3].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-B*/
- p = yymsp[-3].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_EXISTS, 0, 0, 0);
+ p = yymsp[-3].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_EXISTS, 0, 0);
sqlite3PExprAddSelect(pParse, p, yymsp[-1].minor.yy243);
}
break;
case 196: /* expr ::= CASE case_operand case_exprlist case_else END */
{
spanSet(&yymsp[-4].minor.yy190,&yymsp[-4].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-C*/
- yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_CASE, yymsp[-3].minor.yy72, 0, 0);
+ yymsp[-4].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_CASE, yymsp[-3].minor.yy72, 0);
if( yymsp[-4].minor.yy190.pExpr ){
yymsp[-4].minor.yy190.pExpr->x.pList = yymsp[-1].minor.yy72 ? sqlite3ExprListAppend(pParse,yymsp[-2].minor.yy148,yymsp[-1].minor.yy72) : yymsp[-2].minor.yy148;
sqlite3ExprSetHeightAndFlags(pParse, yymsp[-4].minor.yy190.pExpr);
@@ -136122,7 +137666,7 @@ static void yy_reduce(
case 247: /* expr ::= RAISE LP IGNORE RP */
{
spanSet(&yymsp[-3].minor.yy190,&yymsp[-3].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/
- yymsp[-3].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_RAISE, 0, 0, 0);
+ yymsp[-3].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_RAISE, 0, 0);
if( yymsp[-3].minor.yy190.pExpr ){
yymsp[-3].minor.yy190.pExpr->affinity = OE_Ignore;
}
@@ -136131,7 +137675,7 @@ static void yy_reduce(
case 248: /* expr ::= RAISE LP raisetype COMMA nm RP */
{
spanSet(&yymsp[-5].minor.yy190,&yymsp[-5].minor.yy0,&yymsp[0].minor.yy0); /*A-overwrites-X*/
- yymsp[-5].minor.yy190.pExpr = sqlite3PExpr(pParse, TK_RAISE, 0, 0, &yymsp[-1].minor.yy0);
+ yymsp[-5].minor.yy190.pExpr = sqlite3ExprAlloc(pParse->db, TK_RAISE, &yymsp[-1].minor.yy0, 1);
if( yymsp[-5].minor.yy190.pExpr ) {
yymsp[-5].minor.yy190.pExpr->affinity = (char)yymsp[-3].minor.yy194;
}
@@ -136630,13 +138174,13 @@ static const unsigned char aiClass[] = {
/* 1x */ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
/* 2x */ 27, 27, 27, 27, 27, 7, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
/* 3x */ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27,
-/* 4x */ 7, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 12, 17, 20, 10,
+/* 4x */ 7, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 26, 12, 17, 20, 10,
/* 5x */ 24, 27, 27, 27, 27, 27, 27, 27, 27, 27, 15, 4, 21, 18, 19, 27,
-/* 6x */ 11, 16, 27, 27, 27, 27, 27, 27, 27, 27, 27, 23, 22, 1, 13, 7,
+/* 6x */ 11, 16, 27, 27, 27, 27, 27, 27, 27, 27, 27, 23, 22, 1, 13, 6,
/* 7x */ 27, 27, 27, 27, 27, 27, 27, 27, 27, 8, 5, 5, 5, 8, 14, 8,
/* 8x */ 27, 1, 1, 1, 1, 1, 1, 1, 1, 1, 27, 27, 27, 27, 27, 27,
/* 9x */ 27, 1, 1, 1, 1, 1, 1, 1, 1, 1, 27, 27, 27, 27, 27, 27,
-/* 9x */ 25, 1, 1, 1, 1, 1, 1, 0, 1, 1, 27, 27, 27, 27, 27, 27,
+/* Ax */ 27, 25, 1, 1, 1, 1, 1, 0, 1, 1, 27, 27, 27, 27, 27, 27,
/* Bx */ 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 9, 27, 27, 27, 27, 27,
/* Cx */ 27, 1, 1, 1, 1, 1, 1, 1, 1, 1, 27, 27, 27, 27, 27, 27,
/* Dx */ 27, 1, 1, 1, 1, 1, 1, 1, 1, 1, 27, 27, 27, 27, 27, 27,
@@ -137319,6 +138863,9 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql, char **pzEr
int lastTokenParsed = -1; /* type of the previous token */
sqlite3 *db = pParse->db; /* The database connection */
int mxSqlLen; /* Max length of an SQL string */
+#ifdef sqlite3Parser_ENGINEALWAYSONSTACK
+ unsigned char zSpace[sizeof(yyParser)]; /* Space for parser engine object */
+#endif
assert( zSql!=0 );
mxSqlLen = db->aLimit[SQLITE_LIMIT_SQL_LENGTH];
@@ -137330,16 +138877,20 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql, char **pzEr
i = 0;
assert( pzErrMsg!=0 );
/* sqlite3ParserTrace(stdout, "parser: "); */
+#ifdef sqlite3Parser_ENGINEALWAYSONSTACK
+ pEngine = zSpace;
+ sqlite3ParserInit(pEngine);
+#else
pEngine = sqlite3ParserAlloc(sqlite3Malloc);
if( pEngine==0 ){
sqlite3OomFault(db);
return SQLITE_NOMEM_BKPT;
}
+#endif
assert( pParse->pNewTable==0 );
assert( pParse->pNewTrigger==0 );
assert( pParse->nVar==0 );
- assert( pParse->nzVar==0 );
- assert( pParse->azVar==0 );
+ assert( pParse->pVList==0 );
while( 1 ){
assert( i>=0 );
if( zSql[i]!=0 ){
@@ -137387,7 +138938,11 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql, char **pzEr
);
sqlite3_mutex_leave(sqlite3MallocMutex());
#endif /* YYDEBUG */
+#ifdef sqlite3Parser_ENGINEALWAYSONSTACK
+ sqlite3ParserFinalize(pEngine);
+#else
sqlite3ParserFree(pEngine, sqlite3_free);
+#endif
if( db->mallocFailed ){
pParse->rc = SQLITE_NOMEM_BKPT;
}
@@ -137426,8 +138981,7 @@ SQLITE_PRIVATE int sqlite3RunParser(Parse *pParse, const char *zSql, char **pzEr
if( pParse->pWithToFree ) sqlite3WithDelete(db, pParse->pWithToFree);
sqlite3DeleteTrigger(db, pParse->pNewTrigger);
- for(i=pParse->nzVar-1; i>=0; i--) sqlite3DbFree(db, pParse->azVar[i]);
- sqlite3DbFree(db, pParse->azVar);
+ sqlite3DbFree(db, pParse->pVList);
while( pParse->pAinc ){
AutoincInfo *p = pParse->pAinc;
pParse->pAinc = p->pNext;
@@ -138640,6 +140194,7 @@ SQLITE_API int sqlite3_db_config(sqlite3 *db, int op, ...){
{ SQLITE_DBCONFIG_ENABLE_TRIGGER, SQLITE_EnableTrigger },
{ SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER, SQLITE_Fts3Tokenizer },
{ SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION, SQLITE_LoadExtension },
+ { SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE, SQLITE_NoCkptOnClose },
};
unsigned int i;
rc = SQLITE_ERROR; /* IMP: R-42790-23372 */
@@ -139397,7 +140952,7 @@ SQLITE_API int sqlite3_busy_timeout(sqlite3 *db, int ms){
*/
SQLITE_API void sqlite3_interrupt(sqlite3 *db){
#ifdef SQLITE_ENABLE_API_ARMOR
- if( !sqlite3SafetyCheckOk(db) ){
+ if( !sqlite3SafetyCheckOk(db) && (db==0 || db->magic!=SQLITE_MAGIC_ZOMBIE) ){
(void)SQLITE_MISUSE_BKPT;
return;
}
@@ -139936,6 +141491,13 @@ SQLITE_API int sqlite3_wal_checkpoint_v2(
sqlite3Error(db, rc);
}
rc = sqlite3ApiExit(db, rc);
+
+ /* If there are no active statements, clear the interrupt flag at this
+ ** point. */
+ if( db->nVdbeActive==0 ){
+ db->u1.isInterrupted = 0;
+ }
+
sqlite3_mutex_leave(db->mutex);
return rc;
#endif
@@ -140438,6 +142000,7 @@ SQLITE_PRIVATE int sqlite3ParseUri(
assert( octet>=0 && octet<256 );
if( octet==0 ){
+#ifndef SQLITE_ENABLE_URI_00_ERROR
/* This branch is taken when "%00" appears within the URI. In this
** case we ignore all text in the remainder of the path, name or
** value currently being parsed. So ignore the current character
@@ -140450,6 +142013,12 @@ SQLITE_PRIVATE int sqlite3ParseUri(
iIn++;
}
continue;
+#else
+ /* If ENABLE_URI_00_ERROR is defined, "%00" in a URI is an error. */
+ *pzErrMsg = sqlite3_mprintf("unexpected %%00 in uri");
+ rc = SQLITE_ERROR;
+ goto parse_uri_out;
+#endif
}
c = octet;
}else if( eState==1 && (c=='&' || c=='=') ){
@@ -140554,7 +142123,9 @@ SQLITE_PRIVATE int sqlite3ParseUri(
}else{
zFile = sqlite3_malloc64(nUri+2);
if( !zFile ) return SQLITE_NOMEM_BKPT;
- memcpy(zFile, zUri, nUri);
+ if( nUri ){
+ memcpy(zFile, zUri, nUri);
+ }
zFile[nUri] = '\0';
zFile[nUri+1] = '\0';
flags &= ~SQLITE_OPEN_URI;
@@ -141348,7 +142919,7 @@ SQLITE_API int sqlite3_file_control(sqlite3 *db, const char *zDbName, int op, vo
*/
SQLITE_API int sqlite3_test_control(int op, ...){
int rc = 0;
-#ifdef SQLITE_OMIT_BUILTIN_TEST
+#ifdef SQLITE_UNTESTABLE
UNUSED_PARAMETER(op);
#else
va_list ap;
@@ -141685,7 +143256,7 @@ SQLITE_API int sqlite3_test_control(int op, ...){
}
}
va_end(ap);
-#endif /* SQLITE_OMIT_BUILTIN_TEST */
+#endif /* SQLITE_UNTESTABLE */
return rc;
}
@@ -141741,15 +143312,8 @@ SQLITE_API sqlite3_int64 sqlite3_uri_int64(
** Return the Btree pointer identified by zDbName. Return NULL if not found.
*/
SQLITE_PRIVATE Btree *sqlite3DbNameToBtree(sqlite3 *db, const char *zDbName){
- int i;
- for(i=0; i<db->nDb; i++){
- if( db->aDb[i].pBt
- && (zDbName==0 || sqlite3StrICmp(zDbName, db->aDb[i].zDbSName)==0)
- ){
- return db->aDb[i].pBt;
- }
- }
- return 0;
+ int iDb = zDbName ? sqlite3FindDbName(db, zDbName) : 0;
+ return iDb<0 ? 0 : db->aDb[iDb].pBt;
}
/*
@@ -141796,7 +143360,6 @@ SQLITE_API int sqlite3_snapshot_get(
){
int rc = SQLITE_ERROR;
#ifndef SQLITE_OMIT_WAL
- int iDb;
#ifdef SQLITE_ENABLE_API_ARMOR
if( !sqlite3SafetyCheckOk(db) ){
@@ -141805,13 +143368,15 @@ SQLITE_API int sqlite3_snapshot_get(
#endif
sqlite3_mutex_enter(db->mutex);
- iDb = sqlite3FindDbName(db, zDb);
- if( iDb==0 || iDb>1 ){
- Btree *pBt = db->aDb[iDb].pBt;
- if( 0==sqlite3BtreeIsInTrans(pBt) ){
- rc = sqlite3BtreeBeginTrans(pBt, 0);
- if( rc==SQLITE_OK ){
- rc = sqlite3PagerSnapshotGet(sqlite3BtreePager(pBt), ppSnapshot);
+ if( db->autoCommit==0 ){
+ int iDb = sqlite3FindDbName(db, zDb);
+ if( iDb==0 || iDb>1 ){
+ Btree *pBt = db->aDb[iDb].pBt;
+ if( 0==sqlite3BtreeIsInTrans(pBt) ){
+ rc = sqlite3BtreeBeginTrans(pBt, 0);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3PagerSnapshotGet(sqlite3BtreePager(pBt), ppSnapshot);
+ }
}
}
}
@@ -141859,6 +143424,38 @@ SQLITE_API int sqlite3_snapshot_open(
}
/*
+** Recover as many snapshots as possible from the wal file associated with
+** schema zDb of database db.
+*/
+SQLITE_API int sqlite3_snapshot_recover(sqlite3 *db, const char *zDb){
+ int rc = SQLITE_ERROR;
+ int iDb;
+#ifndef SQLITE_OMIT_WAL
+
+#ifdef SQLITE_ENABLE_API_ARMOR
+ if( !sqlite3SafetyCheckOk(db) ){
+ return SQLITE_MISUSE_BKPT;
+ }
+#endif
+
+ sqlite3_mutex_enter(db->mutex);
+ iDb = sqlite3FindDbName(db, zDb);
+ if( iDb==0 || iDb>1 ){
+ Btree *pBt = db->aDb[iDb].pBt;
+ if( 0==sqlite3BtreeIsInReadTrans(pBt) ){
+ rc = sqlite3BtreeBeginTrans(pBt, 0);
+ if( rc==SQLITE_OK ){
+ rc = sqlite3PagerSnapshotRecover(sqlite3BtreePager(pBt));
+ sqlite3BtreeCommit(pBt);
+ }
+ }
+ }
+ sqlite3_mutex_leave(db->mutex);
+#endif /* SQLITE_OMIT_WAL */
+ return rc;
+}
+
+/*
** Free a snapshot handle obtained from sqlite3_snapshot_get().
*/
SQLITE_API void sqlite3_snapshot_free(sqlite3_snapshot *pSnapshot){
@@ -143008,6 +144605,7 @@ struct Fts3Table {
** statements is run and reset within a single virtual table API call.
*/
sqlite3_stmt *aStmt[40];
+ sqlite3_stmt *pSeekStmt; /* Cache for fts3CursorSeekStmt() */
char *zReadExprlist;
char *zWriteExprlist;
@@ -143077,6 +144675,7 @@ struct Fts3Cursor {
i16 eSearch; /* Search strategy (see below) */
u8 isEof; /* True if at End Of Results */
u8 isRequireSeek; /* True if must seek pStmt to %_content row */
+ u8 bSeekStmt; /* True if pStmt is a seek */
sqlite3_stmt *pStmt; /* Prepared statement in use by the cursor */
Fts3Expr *pExpr; /* Parsed MATCH query string */
int iLangid; /* Language being queried for */
@@ -143599,6 +145198,7 @@ static int fts3DisconnectMethod(sqlite3_vtab *pVtab){
assert( p->pSegments==0 );
/* Free any prepared statements held */
+ sqlite3_finalize(p->pSeekStmt);
for(i=0; i<SizeofArray(p->aStmt); i++){
sqlite3_finalize(p->aStmt[i]);
}
@@ -144470,9 +146070,9 @@ static int fts3InitVtab(
p->pTokenizer = pTokenizer;
p->nMaxPendingData = FTS3_MAX_PENDING_DATA;
p->bHasDocsize = (isFts4 && bNoDocsize==0);
- p->bHasStat = isFts4;
- p->bFts4 = isFts4;
- p->bDescIdx = bDescIdx;
+ p->bHasStat = (u8)isFts4;
+ p->bFts4 = (u8)isFts4;
+ p->bDescIdx = (u8)bDescIdx;
p->nAutoincrmerge = 0xff; /* 0xff means setting unknown */
p->zContentTbl = zContent;
p->zLanguageid = zLanguageid;
@@ -144788,13 +146388,33 @@ static int fts3OpenMethod(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCsr){
}
/*
+** Finalize the statement handle at pCsr->pStmt.
+**
+** Or, if that statement handle is one created by fts3CursorSeekStmt(),
+** and the Fts3Table.pSeekStmt slot is currently NULL, save the statement
+** pointer there instead of finalizing it.
+*/
+static void fts3CursorFinalizeStmt(Fts3Cursor *pCsr){
+ if( pCsr->bSeekStmt ){
+ Fts3Table *p = (Fts3Table *)pCsr->base.pVtab;
+ if( p->pSeekStmt==0 ){
+ p->pSeekStmt = pCsr->pStmt;
+ sqlite3_reset(pCsr->pStmt);
+ pCsr->pStmt = 0;
+ }
+ pCsr->bSeekStmt = 0;
+ }
+ sqlite3_finalize(pCsr->pStmt);
+}
+
+/*
** Close the cursor. For additional information see the documentation
** on the xClose method of the virtual table interface.
*/
static int fts3CloseMethod(sqlite3_vtab_cursor *pCursor){
Fts3Cursor *pCsr = (Fts3Cursor *)pCursor;
assert( ((Fts3Table *)pCsr->base.pVtab)->pSegments==0 );
- sqlite3_finalize(pCsr->pStmt);
+ fts3CursorFinalizeStmt(pCsr);
sqlite3Fts3ExprFree(pCsr->pExpr);
sqlite3Fts3FreeDeferredTokens(pCsr);
sqlite3_free(pCsr->aDoclist);
@@ -144812,20 +146432,23 @@ static int fts3CloseMethod(sqlite3_vtab_cursor *pCursor){
**
** (or the equivalent for a content=xxx table) and set pCsr->pStmt to
** it. If an error occurs, return an SQLite error code.
-**
-** Otherwise, set *ppStmt to point to pCsr->pStmt and return SQLITE_OK.
*/
-static int fts3CursorSeekStmt(Fts3Cursor *pCsr, sqlite3_stmt **ppStmt){
+static int fts3CursorSeekStmt(Fts3Cursor *pCsr){
int rc = SQLITE_OK;
if( pCsr->pStmt==0 ){
Fts3Table *p = (Fts3Table *)pCsr->base.pVtab;
char *zSql;
- zSql = sqlite3_mprintf("SELECT %s WHERE rowid = ?", p->zReadExprlist);
- if( !zSql ) return SQLITE_NOMEM;
- rc = sqlite3_prepare_v2(p->db, zSql, -1, &pCsr->pStmt, 0);
- sqlite3_free(zSql);
+ if( p->pSeekStmt ){
+ pCsr->pStmt = p->pSeekStmt;
+ p->pSeekStmt = 0;
+ }else{
+ zSql = sqlite3_mprintf("SELECT %s WHERE rowid = ?", p->zReadExprlist);
+ if( !zSql ) return SQLITE_NOMEM;
+ rc = sqlite3_prepare_v2(p->db, zSql, -1, &pCsr->pStmt, 0);
+ sqlite3_free(zSql);
+ }
+ if( rc==SQLITE_OK ) pCsr->bSeekStmt = 1;
}
- *ppStmt = pCsr->pStmt;
return rc;
}
@@ -144837,9 +146460,7 @@ static int fts3CursorSeekStmt(Fts3Cursor *pCsr, sqlite3_stmt **ppStmt){
static int fts3CursorSeek(sqlite3_context *pContext, Fts3Cursor *pCsr){
int rc = SQLITE_OK;
if( pCsr->isRequireSeek ){
- sqlite3_stmt *pStmt = 0;
-
- rc = fts3CursorSeekStmt(pCsr, &pStmt);
+ rc = fts3CursorSeekStmt(pCsr);
if( rc==SQLITE_OK ){
sqlite3_bind_int64(pCsr->pStmt, 1, pCsr->iPrevId);
pCsr->isRequireSeek = 0;
@@ -146297,7 +147918,7 @@ static int fts3FilterMethod(
assert( iIdx==nVal );
/* In case the cursor has been used before, clear it now. */
- sqlite3_finalize(pCsr->pStmt);
+ fts3CursorFinalizeStmt(pCsr);
sqlite3_free(pCsr->aDoclist);
sqlite3Fts3MIBufferFree(pCsr->pMIBuffer);
sqlite3Fts3ExprFree(pCsr->pExpr);
@@ -146365,7 +147986,7 @@ static int fts3FilterMethod(
rc = SQLITE_NOMEM;
}
}else if( eSearch==FTS3_DOCID_SEARCH ){
- rc = fts3CursorSeekStmt(pCsr, &pCsr->pStmt);
+ rc = fts3CursorSeekStmt(pCsr);
if( rc==SQLITE_OK ){
rc = sqlite3_bind_value(pCsr->pStmt, 1, pCons);
}
@@ -146529,7 +148150,7 @@ static int fts3SetHasStat(Fts3Table *p){
if( rc==SQLITE_OK ){
int bHasStat = (sqlite3_step(pStmt)==SQLITE_ROW);
rc = sqlite3_finalize(pStmt);
- if( rc==SQLITE_OK ) p->bHasStat = bHasStat;
+ if( rc==SQLITE_OK ) p->bHasStat = (u8)bHasStat;
}
sqlite3_free(zSql);
}else{
@@ -161385,6 +163006,7 @@ SQLITE_PRIVATE int sqlite3FtsUnicodeFold(int c, int bRemoveDiacritic){
#ifndef SQLITE_AMALGAMATION
#include "sqlite3rtree.h"
typedef sqlite3_int64 i64;
+typedef sqlite3_uint64 u64;
typedef unsigned char u8;
typedef unsigned short u16;
typedef unsigned int u32;
@@ -161433,13 +163055,16 @@ struct Rtree {
sqlite3 *db; /* Host database connection */
int iNodeSize; /* Size in bytes of each node in the node table */
u8 nDim; /* Number of dimensions */
+ u8 nDim2; /* Twice the number of dimensions */
u8 eCoordType; /* RTREE_COORD_REAL32 or RTREE_COORD_INT32 */
u8 nBytesPerCell; /* Bytes consumed per cell */
+ u8 inWrTrans; /* True if inside write transaction */
int iDepth; /* Current depth of the r-tree structure */
char *zDb; /* Name of database containing r-tree table */
char *zName; /* Name of r-tree table */
- int nBusy; /* Current number of users of this structure */
+ u32 nBusy; /* Current number of users of this structure */
i64 nRowEst; /* Estimated number of rows in this table */
+ u32 nCursor; /* Number of open cursors */
/* List of nodes removed during a CondenseTree operation. List is
** linked together via the pointer normally used for hash chains -
@@ -161449,8 +163074,10 @@ struct Rtree {
RtreeNode *pDeleted;
int iReinsertHeight; /* Height of sub-trees Reinsert() has run on */
+ /* Blob I/O on xxx_node */
+ sqlite3_blob *pNodeBlob;
+
/* Statements to read/write/delete a record from xxx_node */
- sqlite3_stmt *pReadNode;
sqlite3_stmt *pWriteNode;
sqlite3_stmt *pDeleteNode;
@@ -161679,6 +163306,64 @@ struct RtreeMatchArg {
# define MIN(x,y) ((x) > (y) ? (y) : (x))
#endif
+/* What version of GCC is being used. 0 means GCC is not being used */
+#ifndef GCC_VERSION
+#if defined(__GNUC__) && !defined(SQLITE_DISABLE_INTRINSIC)
+# define GCC_VERSION (__GNUC__*1000000+__GNUC_MINOR__*1000+__GNUC_PATCHLEVEL__)
+#else
+# define GCC_VERSION 0
+#endif
+#endif
+
+/* What version of CLANG is being used. 0 means CLANG is not being used */
+#ifndef CLANG_VERSION
+#if defined(__clang__) && !defined(_WIN32) && !defined(SQLITE_DISABLE_INTRINSIC)
+# define CLANG_VERSION \
+ (__clang_major__*1000000+__clang_minor__*1000+__clang_patchlevel__)
+#else
+# define CLANG_VERSION 0
+#endif
+#endif
+
+/* The testcase() macro should already be defined in the amalgamation. If
+** it is not, make it a no-op.
+*/
+#ifndef SQLITE_AMALGAMATION
+# define testcase(X)
+#endif
+
+/*
+** Macros to determine whether the machine is big or little endian,
+** and whether or not that determination is run-time or compile-time.
+**
+** For best performance, an attempt is made to guess at the byte-order
+** using C-preprocessor macros. If that is unsuccessful, or if
+** -DSQLITE_RUNTIME_BYTEORDER=1 is set, then byte-order is determined
+** at run-time.
+*/
+#ifndef SQLITE_BYTEORDER
+#if defined(i386) || defined(__i386__) || defined(_M_IX86) || \
+ defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) || \
+ defined(_M_AMD64) || defined(_M_ARM) || defined(__x86) || \
+ defined(__arm__)
+# define SQLITE_BYTEORDER 1234
+#elif defined(sparc) || defined(__ppc__)
+# define SQLITE_BYTEORDER 4321
+#else
+# define SQLITE_BYTEORDER 0 /* 0 means "unknown at compile-time" */
+#endif
+#endif
+
+
+/* What version of MSVC is being used. 0 means MSVC is not being used */
+#ifndef MSVC_VERSION
+#if defined(_MSC_VER) && !defined(SQLITE_DISABLE_INTRINSIC)
+# define MSVC_VERSION _MSC_VER
+#else
+# define MSVC_VERSION 0
+#endif
+#endif
+
/*
** Functions to deserialize a 16 bit integer, 32 bit real number and
** 64 bit integer. The deserialized value is returned.
@@ -161687,14 +163372,36 @@ static int readInt16(u8 *p){
return (p[0]<<8) + p[1];
}
static void readCoord(u8 *p, RtreeCoord *pCoord){
+ assert( ((((char*)p) - (char*)0)&3)==0 ); /* p is always 4-byte aligned */
+#if SQLITE_BYTEORDER==1234 && MSVC_VERSION>=1300
+ pCoord->u = _byteswap_ulong(*(u32*)p);
+#elif SQLITE_BYTEORDER==1234 && (GCC_VERSION>=4003000 || CLANG_VERSION>=3000000)
+ pCoord->u = __builtin_bswap32(*(u32*)p);
+#elif SQLITE_BYTEORDER==4321
+ pCoord->u = *(u32*)p;
+#else
pCoord->u = (
(((u32)p[0]) << 24) +
(((u32)p[1]) << 16) +
(((u32)p[2]) << 8) +
(((u32)p[3]) << 0)
);
+#endif
}
static i64 readInt64(u8 *p){
+#if SQLITE_BYTEORDER==1234 && MSVC_VERSION>=1300
+ u64 x;
+ memcpy(&x, p, 8);
+ return (i64)_byteswap_uint64(x);
+#elif SQLITE_BYTEORDER==1234 && (GCC_VERSION>=4003000 || CLANG_VERSION>=3000000)
+ u64 x;
+ memcpy(&x, p, 8);
+ return (i64)__builtin_bswap64(x);
+#elif SQLITE_BYTEORDER==4321
+ i64 x;
+ memcpy(&x, p, 8);
+ return x;
+#else
return (
(((i64)p[0]) << 56) +
(((i64)p[1]) << 48) +
@@ -161705,6 +163412,7 @@ static i64 readInt64(u8 *p){
(((i64)p[6]) << 8) +
(((i64)p[7]) << 0)
);
+#endif
}
/*
@@ -161712,23 +163420,43 @@ static i64 readInt64(u8 *p){
** 64 bit integer. The value returned is the number of bytes written
** to the argument buffer (always 2, 4 and 8 respectively).
*/
-static int writeInt16(u8 *p, int i){
+static void writeInt16(u8 *p, int i){
p[0] = (i>> 8)&0xFF;
p[1] = (i>> 0)&0xFF;
- return 2;
}
static int writeCoord(u8 *p, RtreeCoord *pCoord){
u32 i;
+ assert( ((((char*)p) - (char*)0)&3)==0 ); /* p is always 4-byte aligned */
assert( sizeof(RtreeCoord)==4 );
assert( sizeof(u32)==4 );
+#if SQLITE_BYTEORDER==1234 && (GCC_VERSION>=4003000 || CLANG_VERSION>=3000000)
+ i = __builtin_bswap32(pCoord->u);
+ memcpy(p, &i, 4);
+#elif SQLITE_BYTEORDER==1234 && MSVC_VERSION>=1300
+ i = _byteswap_ulong(pCoord->u);
+ memcpy(p, &i, 4);
+#elif SQLITE_BYTEORDER==4321
+ i = pCoord->u;
+ memcpy(p, &i, 4);
+#else
i = pCoord->u;
p[0] = (i>>24)&0xFF;
p[1] = (i>>16)&0xFF;
p[2] = (i>> 8)&0xFF;
p[3] = (i>> 0)&0xFF;
+#endif
return 4;
}
static int writeInt64(u8 *p, i64 i){
+#if SQLITE_BYTEORDER==1234 && (GCC_VERSION>=4003000 || CLANG_VERSION>=3000000)
+ i = (i64)__builtin_bswap64((u64)i);
+ memcpy(p, &i, 8);
+#elif SQLITE_BYTEORDER==1234 && MSVC_VERSION>=1300
+ i = (i64)_byteswap_uint64((u64)i);
+ memcpy(p, &i, 8);
+#elif SQLITE_BYTEORDER==4321
+ memcpy(p, &i, 8);
+#else
p[0] = (i>>56)&0xFF;
p[1] = (i>>48)&0xFF;
p[2] = (i>>40)&0xFF;
@@ -161737,6 +163465,7 @@ static int writeInt64(u8 *p, i64 i){
p[5] = (i>>16)&0xFF;
p[6] = (i>> 8)&0xFF;
p[7] = (i>> 0)&0xFF;
+#endif
return 8;
}
@@ -161820,6 +163549,17 @@ static RtreeNode *nodeNew(Rtree *pRtree, RtreeNode *pParent){
}
/*
+** Clear the Rtree.pNodeBlob object
+*/
+static void nodeBlobReset(Rtree *pRtree){
+ if( pRtree->pNodeBlob && pRtree->inWrTrans==0 && pRtree->nCursor==0 ){
+ sqlite3_blob *pBlob = pRtree->pNodeBlob;
+ pRtree->pNodeBlob = 0;
+ sqlite3_blob_close(pBlob);
+ }
+}
+
+/*
** Obtain a reference to an r-tree node.
*/
static int nodeAcquire(
@@ -161828,9 +163568,8 @@ static int nodeAcquire(
RtreeNode *pParent, /* Either the parent node or NULL */
RtreeNode **ppNode /* OUT: Acquired node */
){
- int rc;
- int rc2 = SQLITE_OK;
- RtreeNode *pNode;
+ int rc = SQLITE_OK;
+ RtreeNode *pNode = 0;
/* Check if the requested node is already in the hash table. If so,
** increase its reference count and return it.
@@ -161846,28 +163585,45 @@ static int nodeAcquire(
return SQLITE_OK;
}
- sqlite3_bind_int64(pRtree->pReadNode, 1, iNode);
- rc = sqlite3_step(pRtree->pReadNode);
- if( rc==SQLITE_ROW ){
- const u8 *zBlob = sqlite3_column_blob(pRtree->pReadNode, 0);
- if( pRtree->iNodeSize==sqlite3_column_bytes(pRtree->pReadNode, 0) ){
- pNode = (RtreeNode *)sqlite3_malloc(sizeof(RtreeNode)+pRtree->iNodeSize);
- if( !pNode ){
- rc2 = SQLITE_NOMEM;
- }else{
- pNode->pParent = pParent;
- pNode->zData = (u8 *)&pNode[1];
- pNode->nRef = 1;
- pNode->iNode = iNode;
- pNode->isDirty = 0;
- pNode->pNext = 0;
- memcpy(pNode->zData, zBlob, pRtree->iNodeSize);
- nodeReference(pParent);
- }
+ if( pRtree->pNodeBlob ){
+ sqlite3_blob *pBlob = pRtree->pNodeBlob;
+ pRtree->pNodeBlob = 0;
+ rc = sqlite3_blob_reopen(pBlob, iNode);
+ pRtree->pNodeBlob = pBlob;
+ if( rc ){
+ nodeBlobReset(pRtree);
+ if( rc==SQLITE_NOMEM ) return SQLITE_NOMEM;
+ }
+ }
+ if( pRtree->pNodeBlob==0 ){
+ char *zTab = sqlite3_mprintf("%s_node", pRtree->zName);
+ if( zTab==0 ) return SQLITE_NOMEM;
+ rc = sqlite3_blob_open(pRtree->db, pRtree->zDb, zTab, "data", iNode, 0,
+ &pRtree->pNodeBlob);
+ sqlite3_free(zTab);
+ }
+ if( rc ){
+ nodeBlobReset(pRtree);
+ *ppNode = 0;
+ /* If unable to open an sqlite3_blob on the desired row, that can only
+ ** be because the shadow tables hold erroneous data. */
+ if( rc==SQLITE_ERROR ) rc = SQLITE_CORRUPT_VTAB;
+ }else if( pRtree->iNodeSize==sqlite3_blob_bytes(pRtree->pNodeBlob) ){
+ pNode = (RtreeNode *)sqlite3_malloc(sizeof(RtreeNode)+pRtree->iNodeSize);
+ if( !pNode ){
+ rc = SQLITE_NOMEM;
+ }else{
+ pNode->pParent = pParent;
+ pNode->zData = (u8 *)&pNode[1];
+ pNode->nRef = 1;
+ pNode->iNode = iNode;
+ pNode->isDirty = 0;
+ pNode->pNext = 0;
+ rc = sqlite3_blob_read(pRtree->pNodeBlob, pNode->zData,
+ pRtree->iNodeSize, 0);
+ nodeReference(pParent);
}
}
- rc = sqlite3_reset(pRtree->pReadNode);
- if( rc==SQLITE_OK ) rc = rc2;
/* If the root node was just loaded, set pRtree->iDepth to the height
** of the r-tree structure. A height of zero means all data is stored on
@@ -161919,7 +163675,7 @@ static void nodeOverwriteCell(
int ii;
u8 *p = &pNode->zData[4 + pRtree->nBytesPerCell*iCell];
p += writeInt64(p, pCell->iRowid);
- for(ii=0; ii<(pRtree->nDim*2); ii++){
+ for(ii=0; ii<pRtree->nDim2; ii++){
p += writeCoord(p, &pCell->aCoord[ii]);
}
pNode->isDirty = 1;
@@ -162053,13 +163809,16 @@ static void nodeGetCell(
){
u8 *pData;
RtreeCoord *pCoord;
- int ii;
+ int ii = 0;
pCell->iRowid = nodeGetRowid(pRtree, pNode, iCell);
pData = pNode->zData + (12 + pRtree->nBytesPerCell*iCell);
pCoord = pCell->aCoord;
- for(ii=0; ii<pRtree->nDim*2; ii++){
- readCoord(&pData[ii*4], &pCoord[ii]);
- }
+ do{
+ readCoord(pData, &pCoord[ii]);
+ readCoord(pData+4, &pCoord[ii+1]);
+ pData += 8;
+ ii += 2;
+ }while( ii<pRtree->nDim2 );
}
@@ -162110,7 +163869,9 @@ static void rtreeReference(Rtree *pRtree){
static void rtreeRelease(Rtree *pRtree){
pRtree->nBusy--;
if( pRtree->nBusy==0 ){
- sqlite3_finalize(pRtree->pReadNode);
+ pRtree->inWrTrans = 0;
+ pRtree->nCursor = 0;
+ nodeBlobReset(pRtree);
sqlite3_finalize(pRtree->pWriteNode);
sqlite3_finalize(pRtree->pDeleteNode);
sqlite3_finalize(pRtree->pReadRowid);
@@ -162148,6 +163909,7 @@ static int rtreeDestroy(sqlite3_vtab *pVtab){
if( !zCreate ){
rc = SQLITE_NOMEM;
}else{
+ nodeBlobReset(pRtree);
rc = sqlite3_exec(pRtree->db, zCreate, 0, 0, 0);
sqlite3_free(zCreate);
}
@@ -162163,6 +163925,7 @@ static int rtreeDestroy(sqlite3_vtab *pVtab){
*/
static int rtreeOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){
int rc = SQLITE_NOMEM;
+ Rtree *pRtree = (Rtree *)pVTab;
RtreeCursor *pCsr;
pCsr = (RtreeCursor *)sqlite3_malloc(sizeof(RtreeCursor));
@@ -162170,6 +163933,7 @@ static int rtreeOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor){
memset(pCsr, 0, sizeof(RtreeCursor));
pCsr->base.pVtab = pVTab;
rc = SQLITE_OK;
+ pRtree->nCursor++;
}
*ppCursor = (sqlite3_vtab_cursor *)pCsr;
@@ -162202,10 +163966,13 @@ static int rtreeClose(sqlite3_vtab_cursor *cur){
Rtree *pRtree = (Rtree *)(cur->pVtab);
int ii;
RtreeCursor *pCsr = (RtreeCursor *)cur;
+ assert( pRtree->nCursor>0 );
freeCursorConstraints(pCsr);
sqlite3_free(pCsr->aPoint);
for(ii=0; ii<RTREE_CACHE_SZ; ii++) nodeRelease(pRtree, pCsr->aNode[ii]);
sqlite3_free(pCsr);
+ pRtree->nCursor--;
+ nodeBlobReset(pRtree);
return SQLITE_OK;
}
@@ -162228,15 +163995,22 @@ static int rtreeEof(sqlite3_vtab_cursor *cur){
** false. a[] is the four bytes of the on-disk record to be decoded.
** Store the results in "r".
**
-** There are three versions of this macro, one each for little-endian and
-** big-endian processors and a third generic implementation. The endian-
-** specific implementations are much faster and are preferred if the
-** processor endianness is known at compile-time. The SQLITE_BYTEORDER
-** macro is part of sqliteInt.h and hence the endian-specific
-** implementation will only be used if this module is compiled as part
-** of the amalgamation.
+** There are five versions of this macro. The last one is generic. The
+** other four are various architectures-specific optimizations.
*/
-#if defined(SQLITE_BYTEORDER) && SQLITE_BYTEORDER==1234
+#if SQLITE_BYTEORDER==1234 && MSVC_VERSION>=1300
+#define RTREE_DECODE_COORD(eInt, a, r) { \
+ RtreeCoord c; /* Coordinate decoded */ \
+ c.u = _byteswap_ulong(*(u32*)a); \
+ r = eInt ? (sqlite3_rtree_dbl)c.i : (sqlite3_rtree_dbl)c.f; \
+}
+#elif SQLITE_BYTEORDER==1234 && (GCC_VERSION>=4003000 || CLANG_VERSION>=3000000)
+#define RTREE_DECODE_COORD(eInt, a, r) { \
+ RtreeCoord c; /* Coordinate decoded */ \
+ c.u = __builtin_bswap32(*(u32*)a); \
+ r = eInt ? (sqlite3_rtree_dbl)c.i : (sqlite3_rtree_dbl)c.f; \
+}
+#elif SQLITE_BYTEORDER==1234
#define RTREE_DECODE_COORD(eInt, a, r) { \
RtreeCoord c; /* Coordinate decoded */ \
memcpy(&c.u,a,4); \
@@ -162244,7 +164018,7 @@ static int rtreeEof(sqlite3_vtab_cursor *cur){
((c.u&0xff)<<24)|((c.u&0xff00)<<8); \
r = eInt ? (sqlite3_rtree_dbl)c.i : (sqlite3_rtree_dbl)c.f; \
}
-#elif defined(SQLITE_BYTEORDER) && SQLITE_BYTEORDER==4321
+#elif SQLITE_BYTEORDER==4321
#define RTREE_DECODE_COORD(eInt, a, r) { \
RtreeCoord c; /* Coordinate decoded */ \
memcpy(&c.u,a,4); \
@@ -162271,10 +164045,10 @@ static int rtreeCallbackConstraint(
sqlite3_rtree_dbl *prScore, /* OUT: score for the cell */
int *peWithin /* OUT: visibility of the cell */
){
- int i; /* Loop counter */
sqlite3_rtree_query_info *pInfo = pConstraint->pInfo; /* Callback info */
int nCoord = pInfo->nCoord; /* No. of coordinates */
int rc; /* Callback return code */
+ RtreeCoord c; /* Translator union */
sqlite3_rtree_dbl aCoord[RTREE_MAX_DIMENSIONS*2]; /* Decoded coordinates */
assert( pConstraint->op==RTREE_MATCH || pConstraint->op==RTREE_QUERY );
@@ -162284,13 +164058,41 @@ static int rtreeCallbackConstraint(
pInfo->iRowid = readInt64(pCellData);
}
pCellData += 8;
- for(i=0; i<nCoord; i++, pCellData += 4){
- RTREE_DECODE_COORD(eInt, pCellData, aCoord[i]);
+#ifndef SQLITE_RTREE_INT_ONLY
+ if( eInt==0 ){
+ switch( nCoord ){
+ case 10: readCoord(pCellData+36, &c); aCoord[9] = c.f;
+ readCoord(pCellData+32, &c); aCoord[8] = c.f;
+ case 8: readCoord(pCellData+28, &c); aCoord[7] = c.f;
+ readCoord(pCellData+24, &c); aCoord[6] = c.f;
+ case 6: readCoord(pCellData+20, &c); aCoord[5] = c.f;
+ readCoord(pCellData+16, &c); aCoord[4] = c.f;
+ case 4: readCoord(pCellData+12, &c); aCoord[3] = c.f;
+ readCoord(pCellData+8, &c); aCoord[2] = c.f;
+ default: readCoord(pCellData+4, &c); aCoord[1] = c.f;
+ readCoord(pCellData, &c); aCoord[0] = c.f;
+ }
+ }else
+#endif
+ {
+ switch( nCoord ){
+ case 10: readCoord(pCellData+36, &c); aCoord[9] = c.i;
+ readCoord(pCellData+32, &c); aCoord[8] = c.i;
+ case 8: readCoord(pCellData+28, &c); aCoord[7] = c.i;
+ readCoord(pCellData+24, &c); aCoord[6] = c.i;
+ case 6: readCoord(pCellData+20, &c); aCoord[5] = c.i;
+ readCoord(pCellData+16, &c); aCoord[4] = c.i;
+ case 4: readCoord(pCellData+12, &c); aCoord[3] = c.i;
+ readCoord(pCellData+8, &c); aCoord[2] = c.i;
+ default: readCoord(pCellData+4, &c); aCoord[1] = c.i;
+ readCoord(pCellData, &c); aCoord[0] = c.i;
+ }
}
if( pConstraint->op==RTREE_MATCH ){
+ int eWithin = 0;
rc = pConstraint->u.xGeom((sqlite3_rtree_geometry*)pInfo,
- nCoord, aCoord, &i);
- if( i==0 ) *peWithin = NOT_WITHIN;
+ nCoord, aCoord, &eWithin);
+ if( eWithin==0 ) *peWithin = NOT_WITHIN;
*prScore = RTREE_ZERO;
}else{
pInfo->aCoord = aCoord;
@@ -162326,6 +164128,7 @@ static void rtreeNonleafConstraint(
assert(p->op==RTREE_LE || p->op==RTREE_LT || p->op==RTREE_GE
|| p->op==RTREE_GT || p->op==RTREE_EQ );
+ assert( ((((char*)pCellData) - (char*)0)&3)==0 ); /* 4-byte aligned */
switch( p->op ){
case RTREE_LE:
case RTREE_LT:
@@ -162366,6 +164169,7 @@ static void rtreeLeafConstraint(
assert(p->op==RTREE_LE || p->op==RTREE_LT || p->op==RTREE_GE
|| p->op==RTREE_GT || p->op==RTREE_EQ );
pCellData += 8 + p->iCoord*4;
+ assert( ((((char*)pCellData) - (char*)0)&3)==0 ); /* 4-byte aligned */
RTREE_DECODE_COORD(eInt, pCellData, xN);
switch( p->op ){
case RTREE_LE: if( xN <= p->u.rValue ) return; break;
@@ -162434,7 +164238,7 @@ static int rtreeSearchPointCompare(
}
/*
-** Interchange to search points in a cursor.
+** Interchange two search points in a cursor.
*/
static void rtreeSearchPointSwap(RtreeCursor *p, int i, int j){
RtreeSearchPoint t = p->aPoint[i];
@@ -162682,7 +164486,7 @@ static int rtreeStepToLeaf(RtreeCursor *pCur){
if( rScore<RTREE_ZERO ) rScore = RTREE_ZERO;
p = rtreeSearchPointNew(pCur, rScore, x.iLevel);
if( p==0 ) return SQLITE_NOMEM;
- p->eWithin = eWithin;
+ p->eWithin = (u8)eWithin;
p->id = x.id;
p->iCell = x.iCell;
RTREE_QUEUE_TRACE(pCur, "PUSH-S:");
@@ -162741,7 +164545,6 @@ static int rtreeColumn(sqlite3_vtab_cursor *cur, sqlite3_context *ctx, int i){
if( i==0 ){
sqlite3_result_int64(ctx, nodeGetRowid(pRtree, pNode, p->iCell));
}else{
- if( rc ) return rc;
nodeGetCoord(pRtree, pNode, p->iCell, i-1, &c);
#ifndef SQLITE_RTREE_INT_ONLY
if( pRtree->eCoordType==RTREE_COORD_REAL32 ){
@@ -162870,7 +164673,7 @@ static int rtreeFilter(
p->id = iNode;
p->eWithin = PARTLY_WITHIN;
rc = nodeRowidIndex(pRtree, pLeaf, iRowid, &iCell);
- p->iCell = iCell;
+ p->iCell = (u8)iCell;
RTREE_QUEUE_TRACE(pCsr, "PUSH-F1:");
}else{
pCsr->atEOF = 1;
@@ -162903,7 +164706,7 @@ static int rtreeFilter(
if( rc!=SQLITE_OK ){
break;
}
- p->pInfo->nCoord = pRtree->nDim*2;
+ p->pInfo->nCoord = pRtree->nDim2;
p->pInfo->anQueue = pCsr->anQueue;
p->pInfo->mxLevel = pRtree->iDepth + 1;
}else{
@@ -162918,7 +164721,7 @@ static int rtreeFilter(
}
if( rc==SQLITE_OK ){
RtreeSearchPoint *pNew;
- pNew = rtreeSearchPointNew(pCsr, RTREE_ZERO, pRtree->iDepth+1);
+ pNew = rtreeSearchPointNew(pCsr, RTREE_ZERO, (u8)(pRtree->iDepth+1));
if( pNew==0 ) return SQLITE_NOMEM;
pNew->id = 1;
pNew->iCell = 0;
@@ -162937,19 +164740,6 @@ static int rtreeFilter(
}
/*
-** Set the pIdxInfo->estimatedRows variable to nRow. Unless this
-** extension is currently being used by a version of SQLite too old to
-** support estimatedRows. In that case this function is a no-op.
-*/
-static void setEstimatedRows(sqlite3_index_info *pIdxInfo, i64 nRow){
-#if SQLITE_VERSION_NUMBER>=3008002
- if( sqlite3_libversion_number()>=3008002 ){
- pIdxInfo->estimatedRows = nRow;
- }
-#endif
-}
-
-/*
** Rtree virtual table module xBestIndex method. There are three
** table scan strategies to choose from (in order from most to
** least desirable):
@@ -163028,7 +164818,7 @@ static int rtreeBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){
** a single row.
*/
pIdxInfo->estimatedCost = 30.0;
- setEstimatedRows(pIdxInfo, 1);
+ pIdxInfo->estimatedRows = 1;
return SQLITE_OK;
}
@@ -163046,7 +164836,7 @@ static int rtreeBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){
break;
}
zIdxStr[iIdx++] = op;
- zIdxStr[iIdx++] = p->iColumn - 1 + '0';
+ zIdxStr[iIdx++] = (char)(p->iColumn - 1 + '0');
pIdxInfo->aConstraintUsage[ii].argvIndex = (iIdx/2);
pIdxInfo->aConstraintUsage[ii].omit = 1;
}
@@ -163060,7 +164850,7 @@ static int rtreeBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){
nRow = pRtree->nRowEst >> (iIdx/2);
pIdxInfo->estimatedCost = (double)6.0 * (double)nRow;
- setEstimatedRows(pIdxInfo, nRow);
+ pIdxInfo->estimatedRows = nRow;
return rc;
}
@@ -163070,9 +164860,26 @@ static int rtreeBestIndex(sqlite3_vtab *tab, sqlite3_index_info *pIdxInfo){
*/
static RtreeDValue cellArea(Rtree *pRtree, RtreeCell *p){
RtreeDValue area = (RtreeDValue)1;
- int ii;
- for(ii=0; ii<(pRtree->nDim*2); ii+=2){
- area = (area * (DCOORD(p->aCoord[ii+1]) - DCOORD(p->aCoord[ii])));
+ assert( pRtree->nDim>=1 && pRtree->nDim<=5 );
+#ifndef SQLITE_RTREE_INT_ONLY
+ if( pRtree->eCoordType==RTREE_COORD_REAL32 ){
+ switch( pRtree->nDim ){
+ case 5: area = p->aCoord[9].f - p->aCoord[8].f;
+ case 4: area *= p->aCoord[7].f - p->aCoord[6].f;
+ case 3: area *= p->aCoord[5].f - p->aCoord[4].f;
+ case 2: area *= p->aCoord[3].f - p->aCoord[2].f;
+ default: area *= p->aCoord[1].f - p->aCoord[0].f;
+ }
+ }else
+#endif
+ {
+ switch( pRtree->nDim ){
+ case 5: area = p->aCoord[9].i - p->aCoord[8].i;
+ case 4: area *= p->aCoord[7].i - p->aCoord[6].i;
+ case 3: area *= p->aCoord[5].i - p->aCoord[4].i;
+ case 2: area *= p->aCoord[3].i - p->aCoord[2].i;
+ default: area *= p->aCoord[1].i - p->aCoord[0].i;
+ }
}
return area;
}
@@ -163082,11 +164889,12 @@ static RtreeDValue cellArea(Rtree *pRtree, RtreeCell *p){
** of the objects size in each dimension.
*/
static RtreeDValue cellMargin(Rtree *pRtree, RtreeCell *p){
- RtreeDValue margin = (RtreeDValue)0;
- int ii;
- for(ii=0; ii<(pRtree->nDim*2); ii+=2){
+ RtreeDValue margin = 0;
+ int ii = pRtree->nDim2 - 2;
+ do{
margin += (DCOORD(p->aCoord[ii+1]) - DCOORD(p->aCoord[ii]));
- }
+ ii -= 2;
+ }while( ii>=0 );
return margin;
}
@@ -163094,17 +164902,19 @@ static RtreeDValue cellMargin(Rtree *pRtree, RtreeCell *p){
** Store the union of cells p1 and p2 in p1.
*/
static void cellUnion(Rtree *pRtree, RtreeCell *p1, RtreeCell *p2){
- int ii;
+ int ii = 0;
if( pRtree->eCoordType==RTREE_COORD_REAL32 ){
- for(ii=0; ii<(pRtree->nDim*2); ii+=2){
+ do{
p1->aCoord[ii].f = MIN(p1->aCoord[ii].f, p2->aCoord[ii].f);
p1->aCoord[ii+1].f = MAX(p1->aCoord[ii+1].f, p2->aCoord[ii+1].f);
- }
+ ii += 2;
+ }while( ii<pRtree->nDim2 );
}else{
- for(ii=0; ii<(pRtree->nDim*2); ii+=2){
+ do{
p1->aCoord[ii].i = MIN(p1->aCoord[ii].i, p2->aCoord[ii].i);
p1->aCoord[ii+1].i = MAX(p1->aCoord[ii+1].i, p2->aCoord[ii+1].i);
- }
+ ii += 2;
+ }while( ii<pRtree->nDim2 );
}
}
@@ -163115,7 +164925,7 @@ static void cellUnion(Rtree *pRtree, RtreeCell *p1, RtreeCell *p2){
static int cellContains(Rtree *pRtree, RtreeCell *p1, RtreeCell *p2){
int ii;
int isInt = (pRtree->eCoordType==RTREE_COORD_INT32);
- for(ii=0; ii<(pRtree->nDim*2); ii+=2){
+ for(ii=0; ii<pRtree->nDim2; ii+=2){
RtreeCoord *a1 = &p1->aCoord[ii];
RtreeCoord *a2 = &p2->aCoord[ii];
if( (!isInt && (a2[0].f<a1[0].f || a2[1].f>a1[1].f))
@@ -163150,7 +164960,7 @@ static RtreeDValue cellOverlap(
for(ii=0; ii<nCell; ii++){
int jj;
RtreeDValue o = (RtreeDValue)1;
- for(jj=0; jj<(pRtree->nDim*2); jj+=2){
+ for(jj=0; jj<pRtree->nDim2; jj+=2){
RtreeDValue x1, x2;
x1 = MAX(DCOORD(p->aCoord[jj]), DCOORD(aCell[ii].aCoord[jj]));
x2 = MIN(DCOORD(p->aCoord[jj+1]), DCOORD(aCell[ii].aCoord[jj+1]));
@@ -164206,7 +166016,7 @@ static int rtreeUpdate(
** This problem was discovered after years of use, so we silently ignore
** these kinds of misdeclared tables to avoid breaking any legacy.
*/
- assert( nData<=(pRtree->nDim*2 + 3) );
+ assert( nData<=(pRtree->nDim2 + 3) );
#ifndef SQLITE_RTREE_INT_ONLY
if( pRtree->eCoordType==RTREE_COORD_REAL32 ){
@@ -164297,6 +166107,27 @@ constraint:
}
/*
+** Called when a transaction starts.
+*/
+static int rtreeBeginTransaction(sqlite3_vtab *pVtab){
+ Rtree *pRtree = (Rtree *)pVtab;
+ assert( pRtree->inWrTrans==0 );
+ pRtree->inWrTrans++;
+ return SQLITE_OK;
+}
+
+/*
+** Called when a transaction completes (either by COMMIT or ROLLBACK).
+** The sqlite3_blob object should be released at this point.
+*/
+static int rtreeEndTransaction(sqlite3_vtab *pVtab){
+ Rtree *pRtree = (Rtree *)pVtab;
+ pRtree->inWrTrans = 0;
+ nodeBlobReset(pRtree);
+ return SQLITE_OK;
+}
+
+/*
** The xRename method for rtree module virtual tables.
*/
static int rtreeRename(sqlite3_vtab *pVtab, const char *zNewName){
@@ -164317,6 +166148,7 @@ static int rtreeRename(sqlite3_vtab *pVtab, const char *zNewName){
return rc;
}
+
/*
** This function populates the pRtree->nRowEst variable with an estimate
** of the number of rows in the virtual table. If possible, this is based
@@ -164376,15 +166208,15 @@ static sqlite3_module rtreeModule = {
rtreeColumn, /* xColumn - read data */
rtreeRowid, /* xRowid - read data */
rtreeUpdate, /* xUpdate - write data */
- 0, /* xBegin - begin transaction */
- 0, /* xSync - sync transaction */
- 0, /* xCommit - commit transaction */
- 0, /* xRollback - rollback transaction */
+ rtreeBeginTransaction, /* xBegin - begin transaction */
+ rtreeEndTransaction, /* xSync - sync transaction */
+ rtreeEndTransaction, /* xCommit - commit transaction */
+ rtreeEndTransaction, /* xRollback - rollback transaction */
0, /* xFindFunction - function overloading */
rtreeRename, /* xRename - rename the table */
0, /* xSavepoint */
0, /* xRelease */
- 0 /* xRollbackTo */
+ 0, /* xRollbackTo */
};
static int rtreeSqlInit(
@@ -164396,10 +166228,9 @@ static int rtreeSqlInit(
){
int rc = SQLITE_OK;
- #define N_STATEMENT 9
+ #define N_STATEMENT 8
static const char *azSql[N_STATEMENT] = {
- /* Read and write the xxx_node table */
- "SELECT data FROM '%q'.'%q_node' WHERE nodeno = :1",
+ /* Write the xxx_node table */
"INSERT OR REPLACE INTO '%q'.'%q_node' VALUES(:1, :2)",
"DELETE FROM '%q'.'%q_node' WHERE nodeno = :1",
@@ -164437,15 +166268,14 @@ static int rtreeSqlInit(
}
}
- appStmt[0] = &pRtree->pReadNode;
- appStmt[1] = &pRtree->pWriteNode;
- appStmt[2] = &pRtree->pDeleteNode;
- appStmt[3] = &pRtree->pReadRowid;
- appStmt[4] = &pRtree->pWriteRowid;
- appStmt[5] = &pRtree->pDeleteRowid;
- appStmt[6] = &pRtree->pReadParent;
- appStmt[7] = &pRtree->pWriteParent;
- appStmt[8] = &pRtree->pDeleteParent;
+ appStmt[0] = &pRtree->pWriteNode;
+ appStmt[1] = &pRtree->pDeleteNode;
+ appStmt[2] = &pRtree->pReadRowid;
+ appStmt[3] = &pRtree->pWriteRowid;
+ appStmt[4] = &pRtree->pDeleteRowid;
+ appStmt[5] = &pRtree->pReadParent;
+ appStmt[6] = &pRtree->pWriteParent;
+ appStmt[7] = &pRtree->pDeleteParent;
rc = rtreeQueryStat1(db, pRtree);
for(i=0; i<N_STATEMENT && rc==SQLITE_OK; i++){
@@ -164583,9 +166413,10 @@ static int rtreeInit(
pRtree->base.pModule = &rtreeModule;
pRtree->zDb = (char *)&pRtree[1];
pRtree->zName = &pRtree->zDb[nDb+1];
- pRtree->nDim = (argc-4)/2;
- pRtree->nBytesPerCell = 8 + pRtree->nDim*4*2;
- pRtree->eCoordType = eCoordType;
+ pRtree->nDim = (u8)((argc-4)/2);
+ pRtree->nDim2 = pRtree->nDim*2;
+ pRtree->nBytesPerCell = 8 + pRtree->nDim2*4;
+ pRtree->eCoordType = (u8)eCoordType;
memcpy(pRtree->zDb, argv[1], nDb);
memcpy(pRtree->zName, argv[2], nName);
@@ -164658,7 +166489,8 @@ static void rtreenode(sqlite3_context *ctx, int nArg, sqlite3_value **apArg){
UNUSED_PARAMETER(nArg);
memset(&node, 0, sizeof(RtreeNode));
memset(&tree, 0, sizeof(Rtree));
- tree.nDim = sqlite3_value_int(apArg[0]);
+ tree.nDim = (u8)sqlite3_value_int(apArg[0]);
+ tree.nDim2 = tree.nDim*2;
tree.nBytesPerCell = 8 + 8 * tree.nDim;
node.zData = (u8 *)sqlite3_value_blob(apArg[1]);
@@ -164671,7 +166503,7 @@ static void rtreenode(sqlite3_context *ctx, int nArg, sqlite3_value **apArg){
nodeGetCell(&tree, &node, ii, &cell);
sqlite3_snprintf(512-nCell,&zCell[nCell],"%lld", cell.iRowid);
nCell = (int)strlen(zCell);
- for(jj=0; jj<tree.nDim*2; jj++){
+ for(jj=0; jj<tree.nDim2; jj++){
#ifndef SQLITE_RTREE_INT_ONLY
sqlite3_snprintf(512-nCell,&zCell[nCell], " %g",
(double)cell.aCoord[jj].f);
@@ -165379,38 +167211,36 @@ static void icuLoadCollation(
** Register the ICU extension functions with database db.
*/
SQLITE_PRIVATE int sqlite3IcuInit(sqlite3 *db){
- struct IcuScalar {
+ static const struct IcuScalar {
const char *zName; /* Function name */
- int nArg; /* Number of arguments */
- int enc; /* Optimal text encoding */
- void *pContext; /* sqlite3_user_data() context */
+ unsigned char nArg; /* Number of arguments */
+ unsigned short enc; /* Optimal text encoding */
+ unsigned char iContext; /* sqlite3_user_data() context */
void (*xFunc)(sqlite3_context*,int,sqlite3_value**);
} scalars[] = {
- {"regexp", 2, SQLITE_ANY, 0, icuRegexpFunc},
-
- {"lower", 1, SQLITE_UTF16, 0, icuCaseFunc16},
- {"lower", 2, SQLITE_UTF16, 0, icuCaseFunc16},
- {"upper", 1, SQLITE_UTF16, (void*)1, icuCaseFunc16},
- {"upper", 2, SQLITE_UTF16, (void*)1, icuCaseFunc16},
-
- {"lower", 1, SQLITE_UTF8, 0, icuCaseFunc16},
- {"lower", 2, SQLITE_UTF8, 0, icuCaseFunc16},
- {"upper", 1, SQLITE_UTF8, (void*)1, icuCaseFunc16},
- {"upper", 2, SQLITE_UTF8, (void*)1, icuCaseFunc16},
-
- {"like", 2, SQLITE_UTF8, 0, icuLikeFunc},
- {"like", 3, SQLITE_UTF8, 0, icuLikeFunc},
-
- {"icu_load_collation", 2, SQLITE_UTF8, (void*)db, icuLoadCollation},
+ {"icu_load_collation", 2, SQLITE_UTF8, 1, icuLoadCollation},
+ {"regexp", 2, SQLITE_ANY|SQLITE_DETERMINISTIC, 0, icuRegexpFunc},
+ {"lower", 1, SQLITE_UTF16|SQLITE_DETERMINISTIC, 0, icuCaseFunc16},
+ {"lower", 2, SQLITE_UTF16|SQLITE_DETERMINISTIC, 0, icuCaseFunc16},
+ {"upper", 1, SQLITE_UTF16|SQLITE_DETERMINISTIC, 1, icuCaseFunc16},
+ {"upper", 2, SQLITE_UTF16|SQLITE_DETERMINISTIC, 1, icuCaseFunc16},
+ {"lower", 1, SQLITE_UTF8|SQLITE_DETERMINISTIC, 0, icuCaseFunc16},
+ {"lower", 2, SQLITE_UTF8|SQLITE_DETERMINISTIC, 0, icuCaseFunc16},
+ {"upper", 1, SQLITE_UTF8|SQLITE_DETERMINISTIC, 1, icuCaseFunc16},
+ {"upper", 2, SQLITE_UTF8|SQLITE_DETERMINISTIC, 1, icuCaseFunc16},
+ {"like", 2, SQLITE_UTF8|SQLITE_DETERMINISTIC, 0, icuLikeFunc},
+ {"like", 3, SQLITE_UTF8|SQLITE_DETERMINISTIC, 0, icuLikeFunc},
};
-
int rc = SQLITE_OK;
int i;
+
for(i=0; rc==SQLITE_OK && i<(int)(sizeof(scalars)/sizeof(scalars[0])); i++){
- struct IcuScalar *p = &scalars[i];
+ const struct IcuScalar *p = &scalars[i];
rc = sqlite3_create_function(
- db, p->zName, p->nArg, p->enc, p->pContext, p->xFunc, 0, 0
+ db, p->zName, p->nArg, p->enc,
+ p->iContext ? (void*)db : (void*)0,
+ p->xFunc, 0, 0
);
}
@@ -168618,7 +170448,7 @@ static RbuState *rbuLoadState(sqlite3rbu *p){
** Open the database handle and attach the RBU database as "rbu". If an
** error occurs, leave an error code and message in the RBU handle.
*/
-static void rbuOpenDatabase(sqlite3rbu *p){
+static void rbuOpenDatabase(sqlite3rbu *p, int *pbRetry){
assert( p->rc || (p->dbMain==0 && p->dbRbu==0) );
assert( p->rc || rbuIsVacuum(p) || p->zTarget!=0 );
@@ -168693,7 +170523,7 @@ static void rbuOpenDatabase(sqlite3rbu *p){
}else{
RbuState *pState = rbuLoadState(p);
if( pState ){
- bOpen = (pState->eStage>RBU_STAGE_MOVE);
+ bOpen = (pState->eStage>=RBU_STAGE_MOVE);
rbuFreeState(pState);
}
}
@@ -168705,6 +170535,15 @@ static void rbuOpenDatabase(sqlite3rbu *p){
if( !rbuIsVacuum(p) ){
p->dbMain = rbuOpenDbhandle(p, p->zTarget, 1);
}else if( p->pRbuFd->pWalFd ){
+ if( pbRetry ){
+ p->pRbuFd->bNolock = 0;
+ sqlite3_close(p->dbRbu);
+ sqlite3_close(p->dbMain);
+ p->dbMain = 0;
+ p->dbRbu = 0;
+ *pbRetry = 1;
+ return;
+ }
p->rc = SQLITE_ERROR;
p->zErrmsg = sqlite3_mprintf("cannot vacuum wal mode database");
}else{
@@ -168885,16 +170724,18 @@ static void rbuSetupCheckpoint(sqlite3rbu *p, RbuState *pState){
if( rc2!=SQLITE_INTERNAL ) p->rc = rc2;
}
- if( p->rc==SQLITE_OK ){
+ if( p->rc==SQLITE_OK && p->nFrame>0 ){
p->eStage = RBU_STAGE_CKPT;
p->nStep = (pState ? pState->nRow : 0);
p->aBuf = rbuMalloc(p, p->pgsz);
p->iWalCksum = rbuShmChecksum(p);
}
- if( p->rc==SQLITE_OK && pState && pState->iWalCksum!=p->iWalCksum ){
- p->rc = SQLITE_DONE;
- p->eStage = RBU_STAGE_DONE;
+ if( p->rc==SQLITE_OK ){
+ if( p->nFrame==0 || (pState && pState->iWalCksum!=p->iWalCksum) ){
+ p->rc = SQLITE_DONE;
+ p->eStage = RBU_STAGE_DONE;
+ }
}
}
@@ -169067,7 +170908,7 @@ static void rbuMoveOalFile(sqlite3rbu *p){
#endif
if( p->rc==SQLITE_OK ){
- rbuOpenDatabase(p);
+ rbuOpenDatabase(p, 0);
rbuSetupCheckpoint(p, 0);
}
}
@@ -169778,6 +171619,7 @@ static sqlite3rbu *openRbuHandle(
/* Open the target, RBU and state databases */
if( p->rc==SQLITE_OK ){
char *pCsr = (char*)&p[1];
+ int bRetry = 0;
if( zTarget ){
p->zTarget = pCsr;
memcpy(p->zTarget, zTarget, nTarget+1);
@@ -169789,7 +171631,18 @@ static sqlite3rbu *openRbuHandle(
if( zState ){
p->zState = rbuMPrintf(p, "%s", zState);
}
- rbuOpenDatabase(p);
+
+ /* If the first attempt to open the database file fails and the bRetry
+ ** flag it set, this means that the db was not opened because it seemed
+ ** to be a wal-mode db. But, this may have happened due to an earlier
+ ** RBU vacuum operation leaving an old wal file in the directory.
+ ** If this is the case, it will have been checkpointed and deleted
+ ** when the handle was closed and a second attempt to open the
+ ** database may succeed. */
+ rbuOpenDatabase(p, &bRetry);
+ if( bRetry ){
+ rbuOpenDatabase(p, 0);
+ }
}
if( p->rc==SQLITE_OK ){
@@ -172098,9 +173951,7 @@ static int sessionSerializeValue(
if( aBuf ){
sessionVarintPut(&aBuf[1], n);
- memcpy(&aBuf[nVarint + 1], eType==SQLITE_TEXT ?
- sqlite3_value_text(pValue) : sqlite3_value_blob(pValue), n
- );
+ if( n ) memcpy(&aBuf[nVarint + 1], z, n);
}
nByte = 1 + nVarint + n;
@@ -173516,7 +175367,7 @@ static void sessionAppendBlob(
int nBlob,
int *pRc
){
- if( 0==sessionBufferGrow(p, nBlob, pRc) ){
+ if( nBlob>0 && 0==sessionBufferGrow(p, nBlob, pRc) ){
memcpy(&p->aBuf[p->nBuf], aBlob, nBlob);
p->nBuf += nBlob;
}
@@ -173702,13 +175553,13 @@ static int sessionAppendUpdate(
}
default: {
- int nByte;
- int nHdr = 1 + sessionVarintGet(&pCsr[1], &nByte);
+ int n;
+ int nHdr = 1 + sessionVarintGet(&pCsr[1], &n);
assert( eType==SQLITE_TEXT || eType==SQLITE_BLOB );
- nAdvance = nHdr + nByte;
+ nAdvance = nHdr + n;
if( eType==sqlite3_column_type(pStmt, i)
- && nByte==sqlite3_column_bytes(pStmt, i)
- && 0==memcmp(&pCsr[nHdr], sqlite3_column_blob(pStmt, i), nByte)
+ && n==sqlite3_column_bytes(pStmt, i)
+ && (n==0 || 0==memcmp(&pCsr[nHdr], sqlite3_column_blob(pStmt, i), n))
){
break;
}
@@ -174754,7 +176605,7 @@ SQLITE_API int sqlite3changeset_conflict(
if( !pIter->pConflict ){
return SQLITE_MISUSE;
}
- if( iVal<0 || iVal>=sqlite3_column_count(pIter->pConflict) ){
+ if( iVal<0 || iVal>=pIter->nCol ){
return SQLITE_RANGE;
}
*ppValue = sqlite3_column_value(pIter->pConflict, iVal);
@@ -175221,7 +177072,13 @@ static int sessionInsertRow(
sessionAppendStr(&buf, "INSERT INTO main.", &rc);
sessionAppendIdent(&buf, zTab, &rc);
- sessionAppendStr(&buf, " VALUES(?", &rc);
+ sessionAppendStr(&buf, "(", &rc);
+ for(i=0; i<p->nCol; i++){
+ if( i!=0 ) sessionAppendStr(&buf, ", ", &rc);
+ sessionAppendIdent(&buf, p->azCol[i], &rc);
+ }
+
+ sessionAppendStr(&buf, ") VALUES(?", &rc);
for(i=1; i<p->nCol; i++){
sessionAppendStr(&buf, ", ?", &rc);
}
@@ -175767,11 +177624,17 @@ static int sessionChangesetApply(
nTab = (int)strlen(zTab);
sApply.azCol = (const char **)zTab;
}else{
+ int nMinCol = 0;
+ int i;
+
sqlite3changeset_pk(pIter, &abPK, 0);
rc = sessionTableInfo(
db, "main", zNew, &sApply.nCol, &zTab, &sApply.azCol, &sApply.abPK
);
if( rc!=SQLITE_OK ) break;
+ for(i=0; i<sApply.nCol; i++){
+ if( sApply.abPK[i] ) nMinCol = i+1;
+ }
if( sApply.nCol==0 ){
schemaMismatch = 1;
@@ -175779,26 +177642,29 @@ static int sessionChangesetApply(
"sqlite3changeset_apply(): no such table: %s", zTab
);
}
- else if( sApply.nCol!=nCol ){
+ else if( sApply.nCol<nCol ){
schemaMismatch = 1;
sqlite3_log(SQLITE_SCHEMA,
- "sqlite3changeset_apply(): table %s has %d columns, expected %d",
+ "sqlite3changeset_apply(): table %s has %d columns, "
+ "expected %d or more",
zTab, sApply.nCol, nCol
);
}
- else if( memcmp(sApply.abPK, abPK, nCol)!=0 ){
+ else if( nCol<nMinCol || memcmp(sApply.abPK, abPK, nCol)!=0 ){
schemaMismatch = 1;
sqlite3_log(SQLITE_SCHEMA, "sqlite3changeset_apply(): "
"primary key mismatch for table %s", zTab
);
}
- else if(
- (rc = sessionSelectRow(db, zTab, &sApply))
- || (rc = sessionUpdateRow(db, zTab, &sApply))
- || (rc = sessionDeleteRow(db, zTab, &sApply))
- || (rc = sessionInsertRow(db, zTab, &sApply))
- ){
- break;
+ else{
+ sApply.nCol = nCol;
+ if((rc = sessionSelectRow(db, zTab, &sApply))
+ || (rc = sessionUpdateRow(db, zTab, &sApply))
+ || (rc = sessionDeleteRow(db, zTab, &sApply))
+ || (rc = sessionInsertRow(db, zTab, &sApply))
+ ){
+ break;
+ }
}
nTab = sqlite3Strlen30(zTab);
}
@@ -176390,7 +178256,7 @@ SQLITE_API int sqlite3changeset_concat_strm(
** how JSONB might improve on that.)
*/
#if !defined(SQLITE_CORE) || defined(SQLITE_ENABLE_JSON1)
-#if !defined(_SQLITEINT_H_)
+#if !defined(SQLITEINT_H)
/* #include "sqlite3ext.h" */
#endif
SQLITE_EXTENSION_INIT1
@@ -176417,13 +178283,15 @@ SQLITE_EXTENSION_INIT1
#ifdef sqlite3Isdigit
/* Use the SQLite core versions if this routine is part of the
** SQLite amalgamation */
-# define safe_isdigit(x) sqlite3Isdigit(x)
-# define safe_isalnum(x) sqlite3Isalnum(x)
+# define safe_isdigit(x) sqlite3Isdigit(x)
+# define safe_isalnum(x) sqlite3Isalnum(x)
+# define safe_isxdigit(x) sqlite3Isxdigit(x)
#else
/* Use the standard library for separate compilation */
#include <ctype.h> /* amalgamator: keep */
-# define safe_isdigit(x) isdigit((unsigned char)(x))
-# define safe_isalnum(x) isalnum((unsigned char)(x))
+# define safe_isdigit(x) isdigit((unsigned char)(x))
+# define safe_isalnum(x) isalnum((unsigned char)(x))
+# define safe_isxdigit(x) isxdigit((unsigned char)(x))
#endif
/*
@@ -176961,12 +178829,13 @@ static void jsonReturn(
c = z[++i];
if( c=='u' ){
u32 v = 0, k;
- for(k=0; k<4 && i<n-2; i++, k++){
+ for(k=0; k<4; i++, k++){
+ assert( i<n-2 );
c = z[i+1];
- if( c>='0' && c<='9' ) v = v*16 + c - '0';
- else if( c>='A' && c<='F' ) v = v*16 + c - 'A' + 10;
- else if( c>='a' && c<='f' ) v = v*16 + c - 'a' + 10;
- else break;
+ assert( safe_isxdigit(c) );
+ if( c<='9' ) v = v*16 + c - '0';
+ else if( c<='F' ) v = v*16 + c - 'A' + 10;
+ else v = v*16 + c - 'a' + 10;
}
if( v==0 ) break;
if( v<=0x7f ){
@@ -177071,6 +178940,15 @@ static int jsonParseAddNode(
}
/*
+** Return true if z[] begins with 4 (or more) hexadecimal digits
+*/
+static int jsonIs4Hex(const char *z){
+ int i;
+ for(i=0; i<4; i++) if( !safe_isxdigit(z[i]) ) return 0;
+ return 1;
+}
+
+/*
** Parse a single JSON value which begins at pParse->zJson[i]. Return the
** index of the first character past the end of the value parsed.
**
@@ -177144,8 +179022,13 @@ static int jsonParseValue(JsonParse *pParse, u32 i){
if( c==0 ) return -1;
if( c=='\\' ){
c = pParse->zJson[++j];
- if( c==0 ) return -1;
- jnFlags = JNODE_ESCAPE;
+ if( c=='"' || c=='\\' || c=='/' || c=='b' || c=='f'
+ || c=='n' || c=='r' || c=='t'
+ || (c=='u' && jsonIs4Hex(pParse->zJson+j+1)) ){
+ jnFlags = JNODE_ESCAPE;
+ }else{
+ return -1;
+ }
}else if( c=='"' ){
break;
}
@@ -178013,7 +179896,7 @@ static void jsonObjectFinal(sqlite3_context *ctx){
if( pStr ){
jsonAppendChar(pStr, '}');
if( pStr->bErr ){
- if( pStr->bErr==0 ) sqlite3_result_error_nomem(ctx);
+ if( pStr->bErr==1 ) sqlite3_result_error_nomem(ctx);
assert( pStr->bStatic );
}else{
sqlite3_result_text(ctx, pStr->zBuf, pStr->nUsed,
@@ -178291,9 +180174,9 @@ static int jsonEachColumn(
/* For json_each() path and root are the same so fall through
** into the root case */
}
- case JEACH_ROOT: {
+ default: {
const char *zRoot = p->zRoot;
- if( zRoot==0 ) zRoot = "$";
+ if( zRoot==0 ) zRoot = "$";
sqlite3_result_text(ctx, zRoot, -1, SQLITE_STATIC);
break;
}
@@ -180424,6 +182307,31 @@ static int fts5yyGrowStack(fts5yyParser *p){
# define fts5YYMALLOCARGTYPE size_t
#endif
+/* Initialize a new parser that has already been allocated.
+*/
+static void sqlite3Fts5ParserInit(void *fts5yypParser){
+ fts5yyParser *pParser = (fts5yyParser*)fts5yypParser;
+#ifdef fts5YYTRACKMAXSTACKDEPTH
+ pParser->fts5yyhwm = 0;
+#endif
+#if fts5YYSTACKDEPTH<=0
+ pParser->fts5yytos = NULL;
+ pParser->fts5yystack = NULL;
+ pParser->fts5yystksz = 0;
+ if( fts5yyGrowStack(pParser) ){
+ pParser->fts5yystack = &pParser->fts5yystk0;
+ pParser->fts5yystksz = 1;
+ }
+#endif
+#ifndef fts5YYNOERRORRECOVERY
+ pParser->fts5yyerrcnt = -1;
+#endif
+ pParser->fts5yytos = pParser->fts5yystack;
+ pParser->fts5yystack[0].stateno = 0;
+ pParser->fts5yystack[0].major = 0;
+}
+
+#ifndef sqlite3Fts5Parser_ENGINEALWAYSONSTACK
/*
** This function allocates a new parser.
** The only argument is a pointer to a function which works like
@@ -180439,28 +182347,11 @@ static int fts5yyGrowStack(fts5yyParser *p){
static void *sqlite3Fts5ParserAlloc(void *(*mallocProc)(fts5YYMALLOCARGTYPE)){
fts5yyParser *pParser;
pParser = (fts5yyParser*)(*mallocProc)( (fts5YYMALLOCARGTYPE)sizeof(fts5yyParser) );
- if( pParser ){
-#ifdef fts5YYTRACKMAXSTACKDEPTH
- pParser->fts5yyhwm = 0;
-#endif
-#if fts5YYSTACKDEPTH<=0
- pParser->fts5yytos = NULL;
- pParser->fts5yystack = NULL;
- pParser->fts5yystksz = 0;
- if( fts5yyGrowStack(pParser) ){
- pParser->fts5yystack = &pParser->fts5yystk0;
- pParser->fts5yystksz = 1;
- }
-#endif
-#ifndef fts5YYNOERRORRECOVERY
- pParser->fts5yyerrcnt = -1;
-#endif
- pParser->fts5yytos = pParser->fts5yystack;
- pParser->fts5yystack[0].stateno = 0;
- pParser->fts5yystack[0].major = 0;
- }
+ if( pParser ) sqlite3Fts5ParserInit(pParser);
return pParser;
}
+#endif /* sqlite3Fts5Parser_ENGINEALWAYSONSTACK */
+
/* The following function deletes the "minor type" or semantic value
** associated with a symbol. The symbol can be either a terminal
@@ -180542,6 +182433,18 @@ static void fts5yy_pop_parser_stack(fts5yyParser *pParser){
fts5yy_destructor(pParser, fts5yytos->major, &fts5yytos->minor);
}
+/*
+** Clear all secondary memory allocations from the parser
+*/
+static void sqlite3Fts5ParserFinalize(void *p){
+ fts5yyParser *pParser = (fts5yyParser*)p;
+ while( pParser->fts5yytos>pParser->fts5yystack ) fts5yy_pop_parser_stack(pParser);
+#if fts5YYSTACKDEPTH<=0
+ if( pParser->fts5yystack!=&pParser->fts5yystk0 ) free(pParser->fts5yystack);
+#endif
+}
+
+#ifndef sqlite3Fts5Parser_ENGINEALWAYSONSTACK
/*
** Deallocate and destroy a parser. Destructors are called for
** all stack elements before shutting the parser down.
@@ -180554,16 +182457,13 @@ static void sqlite3Fts5ParserFree(
void *p, /* The parser to be deleted */
void (*freeProc)(void*) /* Function used to reclaim memory */
){
- fts5yyParser *pParser = (fts5yyParser*)p;
#ifndef fts5YYPARSEFREENEVERNULL
- if( pParser==0 ) return;
-#endif
- while( pParser->fts5yytos>pParser->fts5yystack ) fts5yy_pop_parser_stack(pParser);
-#if fts5YYSTACKDEPTH<=0
- if( pParser->fts5yystack!=&pParser->fts5yystk0 ) free(pParser->fts5yystack);
+ if( p==0 ) return;
#endif
- (*freeProc)((void*)pParser);
+ sqlite3Fts5ParserFinalize(p);
+ (*freeProc)(p);
}
+#endif /* sqlite3Fts5Parser_ENGINEALWAYSONSTACK */
/*
** Return the peak depth of the stack for a parser.
@@ -180674,7 +182574,6 @@ static int fts5yy_find_reduce_action(
*/
static void fts5yyStackOverflow(fts5yyParser *fts5yypParser){
sqlite3Fts5ParserARG_FETCH;
- fts5yypParser->fts5yytos--;
#ifndef NDEBUG
if( fts5yyTraceFILE ){
fprintf(fts5yyTraceFILE,"%sStack Overflow!\n",fts5yyTracePrompt);
@@ -180729,12 +182628,14 @@ static void fts5yy_shift(
#endif
#if fts5YYSTACKDEPTH>0
if( fts5yypParser->fts5yytos>=&fts5yypParser->fts5yystack[fts5YYSTACKDEPTH] ){
+ fts5yypParser->fts5yytos--;
fts5yyStackOverflow(fts5yypParser);
return;
}
#else
if( fts5yypParser->fts5yytos>=&fts5yypParser->fts5yystack[fts5yypParser->fts5yystksz] ){
if( fts5yyGrowStack(fts5yypParser) ){
+ fts5yypParser->fts5yytos--;
fts5yyStackOverflow(fts5yypParser);
return;
}
@@ -184046,48 +185947,61 @@ static int fts5ExprNearTest(
** Initialize all term iterators in the pNear object. If any term is found
** to match no documents at all, return immediately without initializing any
** further iterators.
+**
+** If an error occurs, return an SQLite error code. Otherwise, return
+** SQLITE_OK. It is not considered an error if some term matches zero
+** documents.
*/
static int fts5ExprNearInitAll(
Fts5Expr *pExpr,
Fts5ExprNode *pNode
){
Fts5ExprNearset *pNear = pNode->pNear;
- int i, j;
- int rc = SQLITE_OK;
- int bEof = 1;
+ int i;
assert( pNode->bNomatch==0 );
- for(i=0; rc==SQLITE_OK && i<pNear->nPhrase; i++){
+ for(i=0; i<pNear->nPhrase; i++){
Fts5ExprPhrase *pPhrase = pNear->apPhrase[i];
- for(j=0; j<pPhrase->nTerm; j++){
- Fts5ExprTerm *pTerm = &pPhrase->aTerm[j];
- Fts5ExprTerm *p;
+ if( pPhrase->nTerm==0 ){
+ pNode->bEof = 1;
+ return SQLITE_OK;
+ }else{
+ int j;
+ for(j=0; j<pPhrase->nTerm; j++){
+ Fts5ExprTerm *pTerm = &pPhrase->aTerm[j];
+ Fts5ExprTerm *p;
+ int bHit = 0;
+
+ for(p=pTerm; p; p=p->pSynonym){
+ int rc;
+ if( p->pIter ){
+ sqlite3Fts5IterClose(p->pIter);
+ p->pIter = 0;
+ }
+ rc = sqlite3Fts5IndexQuery(
+ pExpr->pIndex, p->zTerm, (int)strlen(p->zTerm),
+ (pTerm->bPrefix ? FTS5INDEX_QUERY_PREFIX : 0) |
+ (pExpr->bDesc ? FTS5INDEX_QUERY_DESC : 0),
+ pNear->pColset,
+ &p->pIter
+ );
+ assert( (rc==SQLITE_OK)==(p->pIter!=0) );
+ if( rc!=SQLITE_OK ) return rc;
+ if( 0==sqlite3Fts5IterEof(p->pIter) ){
+ bHit = 1;
+ }
+ }
- for(p=pTerm; p && rc==SQLITE_OK; p=p->pSynonym){
- if( p->pIter ){
- sqlite3Fts5IterClose(p->pIter);
- p->pIter = 0;
- }
- rc = sqlite3Fts5IndexQuery(
- pExpr->pIndex, p->zTerm, (int)strlen(p->zTerm),
- (pTerm->bPrefix ? FTS5INDEX_QUERY_PREFIX : 0) |
- (pExpr->bDesc ? FTS5INDEX_QUERY_DESC : 0),
- pNear->pColset,
- &p->pIter
- );
- assert( rc==SQLITE_OK || p->pIter==0 );
- if( p->pIter && 0==sqlite3Fts5IterEof(p->pIter) ){
- bEof = 0;
+ if( bHit==0 ){
+ pNode->bEof = 1;
+ return SQLITE_OK;
}
}
-
- if( bEof ) break;
}
- if( bEof ) break;
}
- pNode->bEof = bEof;
- return rc;
+ pNode->bEof = 0;
+ return SQLITE_OK;
}
/*
@@ -184220,7 +186134,7 @@ static int fts5ExprNodeTest_STRING(
}
}else{
Fts5IndexIter *pIter = pPhrase->aTerm[j].pIter;
- if( pIter->iRowid==iLast ) continue;
+ if( pIter->iRowid==iLast || pIter->bEof ) continue;
bMatch = 0;
if( fts5ExprAdvanceto(pIter, bDesc, &iLast, &rc, &pNode->bEof) ){
return rc;
@@ -184631,7 +186545,10 @@ static int sqlite3Fts5ExprFirst(Fts5Expr *p, Fts5Index *pIdx, i64 iFirst, int bD
/* If not at EOF but the current rowid occurs earlier than iFirst in
** the iteration order, move to document iFirst or later. */
- if( pRoot->bEof==0 && fts5RowidCmp(p, pRoot->iRowid, iFirst)<0 ){
+ if( rc==SQLITE_OK
+ && 0==pRoot->bEof
+ && fts5RowidCmp(p, pRoot->iRowid, iFirst)<0
+ ){
rc = fts5ExprNodeNext(p, pRoot, 1, iFirst);
}
@@ -184885,7 +186802,7 @@ static Fts5ExprPhrase *sqlite3Fts5ParseTerm(
rc = fts5ParseStringFromToken(pToken, &z);
if( rc==SQLITE_OK ){
- int flags = FTS5_TOKENIZE_QUERY | (bPrefix ? FTS5_TOKENIZE_QUERY : 0);
+ int flags = FTS5_TOKENIZE_QUERY | (bPrefix ? FTS5_TOKENIZE_PREFIX : 0);
int n;
sqlite3Fts5Dequote(z);
n = (int)strlen(z);
@@ -188559,7 +190476,7 @@ static void fts5SegIterNext(
else if( pLeaf->nn>pLeaf->szLeaf ){
pIter->iPgidxOff = pLeaf->szLeaf + fts5GetVarint32(
&pLeaf->p[pLeaf->szLeaf], iOff
- );
+ );
pIter->iLeafOffset = iOff;
pIter->iEndofDoclist = iOff;
bNewTerm = 1;
@@ -188593,6 +190510,7 @@ static void fts5SegIterNext(
*/
int nSz;
assert( p->rc==SQLITE_OK );
+ assert( pIter->iLeafOffset<=pIter->pLeaf->nn );
fts5FastGetVarint32(pIter->pLeaf->p, pIter->iLeafOffset, nSz);
pIter->bDel = (nSz & 0x0001);
pIter->nPos = nSz>>1;
@@ -189360,6 +191278,7 @@ static void fts5MultiIterNext(
i64 iFrom /* Advance at least as far as this */
){
int bUseFrom = bFrom;
+ assert( pIter->base.bEof==0 );
while( p->rc==SQLITE_OK ){
int iFirst = pIter->aFirst[1].iFirst;
int bNewTerm = 0;
@@ -189586,7 +191505,7 @@ static void fts5ChunkIterate(
break;
}else{
pgno++;
- pData = fts5DataRead(p, FTS5_SEGMENT_ROWID(pSeg->pSeg->iSegid, pgno));
+ pData = fts5LeafRead(p, FTS5_SEGMENT_ROWID(pSeg->pSeg->iSegid, pgno));
if( pData==0 ) break;
pChunk = &pData->p[4];
nChunk = MIN(nRem, pData->szLeaf - 4);
@@ -192348,7 +194267,7 @@ static void fts5IndexIntegrityCheckSegment(
** ignore this b-tree entry. Otherwise, load it into memory. */
if( iIdxLeaf<pSeg->pgnoFirst ) continue;
iRow = FTS5_SEGMENT_ROWID(pSeg->iSegid, iIdxLeaf);
- pLeaf = fts5DataRead(p, iRow);
+ pLeaf = fts5LeafRead(p, iRow);
if( pLeaf==0 ) break;
/* Check that the leaf contains at least one term, and that it is equal
@@ -195624,7 +197543,7 @@ static void fts5SourceIdFunc(
){
assert( nArg==0 );
UNUSED_PARAM2(nArg, apUnused);
- sqlite3_result_text(pCtx, "fts5: 2016-11-04 12:08:49 1136863c76576110e710dd5d69ab6bf347c65e36", -1, SQLITE_TRANSIENT);
+ sqlite3_result_text(pCtx, "fts5: 2017-02-13 16:02:40 ada05cfa86ad7f5645450ac7a2a21c9aa6e57d2c", -1, SQLITE_TRANSIENT);
}
static int fts5Init(sqlite3 *db){
@@ -199486,4 +201405,4 @@ static int sqlite3Fts5VocabInit(Fts5Global *pGlobal, sqlite3 *db){
#else // USE_LIBSQLITE3
// If users really want to link against the system sqlite3 we
// need to make this file a noop.
- #endif \ No newline at end of file
+ #endif
diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h
index d900cdd..460cf55 100644
--- a/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h
+++ b/vendor/github.com/mattn/go-sqlite3/sqlite3-binding.h
@@ -122,13 +122,13 @@ extern "C" {
** [sqlite3_libversion_number()], [sqlite3_sourceid()],
** [sqlite_version()] and [sqlite_source_id()].
*/
-#define SQLITE_VERSION "3.15.1"
-#define SQLITE_VERSION_NUMBER 3015001
-#define SQLITE_SOURCE_ID "2016-11-04 12:08:49 1136863c76576110e710dd5d69ab6bf347c65e36"
+#define SQLITE_VERSION "3.17.0"
+#define SQLITE_VERSION_NUMBER 3017000
+#define SQLITE_SOURCE_ID "2017-02-13 16:02:40 ada05cfa86ad7f5645450ac7a2a21c9aa6e57d2c"
/*
** CAPI3REF: Run-Time Library Version Numbers
-** KEYWORDS: sqlite3_version, sqlite3_sourceid
+** KEYWORDS: sqlite3_version sqlite3_sourceid
**
** These interfaces provide the same information as the [SQLITE_VERSION],
** [SQLITE_VERSION_NUMBER], and [SQLITE_SOURCE_ID] C preprocessor macros
@@ -260,7 +260,11 @@ typedef struct sqlite3 sqlite3;
*/
#ifdef SQLITE_INT64_TYPE
typedef SQLITE_INT64_TYPE sqlite_int64;
- typedef unsigned SQLITE_INT64_TYPE sqlite_uint64;
+# ifdef SQLITE_UINT64_TYPE
+ typedef SQLITE_UINT64_TYPE sqlite_uint64;
+# else
+ typedef unsigned SQLITE_INT64_TYPE sqlite_uint64;
+# endif
#elif defined(_MSC_VER) || defined(__BORLANDC__)
typedef __int64 sqlite_int64;
typedef unsigned __int64 sqlite_uint64;
@@ -573,7 +577,7 @@ SQLITE_API int sqlite3_exec(
** file that were written at the application level might have changed
** and that adjacent bytes, even bytes within the same sector are
** guaranteed to be unchanged. The SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN
-** flag indicate that a file cannot be deleted when open. The
+** flag indicates that a file cannot be deleted when open. The
** SQLITE_IOCAP_IMMUTABLE flag indicates that the file is on
** read-only media and cannot be changed even by processes with
** elevated privileges.
@@ -723,6 +727,9 @@ struct sqlite3_file {
** <li> [SQLITE_IOCAP_ATOMIC64K]
** <li> [SQLITE_IOCAP_SAFE_APPEND]
** <li> [SQLITE_IOCAP_SEQUENTIAL]
+** <li> [SQLITE_IOCAP_UNDELETABLE_WHEN_OPEN]
+** <li> [SQLITE_IOCAP_POWERSAFE_OVERWRITE]
+** <li> [SQLITE_IOCAP_IMMUTABLE]
** </ul>
**
** The SQLITE_IOCAP_ATOMIC property means that all writes of
@@ -1036,6 +1043,7 @@ struct sqlite3_io_methods {
#define SQLITE_FCNTL_VFS_POINTER 27
#define SQLITE_FCNTL_JOURNAL_POINTER 28
#define SQLITE_FCNTL_WIN32_GET_HANDLE 29
+#define SQLITE_FCNTL_PDB 30
/* deprecated names */
#define SQLITE_GET_LOCKPROXYFILE SQLITE_FCNTL_GET_LOCKPROXYFILE
@@ -1988,6 +1996,18 @@ struct sqlite3_mem_methods {
** until after the database connection closes.
** </dd>
**
+** <dt>SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE</dt>
+** <dd> Usually, when a database in wal mode is closed or detached from a
+** database handle, SQLite checks if this will mean that there are now no
+** connections at all to the database. If so, it performs a checkpoint
+** operation before closing the connection. This option may be used to
+** override this behaviour. The first parameter passed to this operation
+** is an integer - non-zero to disable checkpoints-on-close, or zero (the
+** default) to enable them. The second parameter is a pointer to an integer
+** into which is written 0 or 1 to indicate whether checkpoints-on-close
+** have been disabled - 0 if they are not disabled, 1 if they are.
+** </dd>
+**
** </dl>
*/
#define SQLITE_DBCONFIG_MAINDBNAME 1000 /* const char* */
@@ -1996,6 +2016,7 @@ struct sqlite3_mem_methods {
#define SQLITE_DBCONFIG_ENABLE_TRIGGER 1003 /* int int* */
#define SQLITE_DBCONFIG_ENABLE_FTS3_TOKENIZER 1004 /* int int* */
#define SQLITE_DBCONFIG_ENABLE_LOAD_EXTENSION 1005 /* int int* */
+#define SQLITE_DBCONFIG_NO_CKPT_ON_CLOSE 1006 /* int int* */
/*
@@ -3597,6 +3618,10 @@ SQLITE_API char *sqlite3_expanded_sql(sqlite3_stmt *pStmt);
** sqlite3_stmt_readonly() to return true since, while those statements
** change the configuration of a database connection, they do not make
** changes to the content of the database files on disk.
+** ^The sqlite3_stmt_readonly() interface returns true for [BEGIN] since
+** [BEGIN] merely sets internal flags, but the [BEGIN|BEGIN IMMEDIATE] and
+** [BEGIN|BEGIN EXCLUSIVE] commands do touch the database and so
+** sqlite3_stmt_readonly() returns false for those commands.
*/
SQLITE_API int sqlite3_stmt_readonly(sqlite3_stmt *pStmt);
@@ -3879,8 +3904,12 @@ SQLITE_API int sqlite3_clear_bindings(sqlite3_stmt*);
** METHOD: sqlite3_stmt
**
** ^Return the number of columns in the result set returned by the
-** [prepared statement]. ^This routine returns 0 if pStmt is an SQL
-** statement that does not return data (for example an [UPDATE]).
+** [prepared statement]. ^If this routine returns 0, that means the
+** [prepared statement] returns no data (for example an [UPDATE]).
+** ^However, just because this routine returns a positive number does not
+** mean that one or more rows of data will be returned. ^A SELECT statement
+** will always have a positive sqlite3_column_count() but depending on the
+** WHERE clause constraints and the table content, it might return no rows.
**
** See also: [sqlite3_data_count()]
*/
@@ -5389,7 +5418,7 @@ SQLITE_API void *sqlite3_rollback_hook(sqlite3*, void(*)(void *), void*);
** ^The update hook is not invoked when [WITHOUT ROWID] tables are modified.
**
** ^In the current implementation, the update hook
-** is not invoked when duplication rows are deleted because of an
+** is not invoked when conflicting rows are deleted because of an
** [ON CONFLICT | ON CONFLICT REPLACE] clause. ^Nor is the update hook
** invoked when rows are deleted using the [truncate optimization].
** The exceptions defined in this paragraph might change in a future
@@ -6171,6 +6200,12 @@ typedef struct sqlite3_blob sqlite3_blob;
** [database connection] error code and message accessible via
** [sqlite3_errcode()] and [sqlite3_errmsg()] and related functions.
**
+** A BLOB referenced by sqlite3_blob_open() may be read using the
+** [sqlite3_blob_read()] interface and modified by using
+** [sqlite3_blob_write()]. The [BLOB handle] can be moved to a
+** different row of the same table using the [sqlite3_blob_reopen()]
+** interface. However, the column, table, or database of a [BLOB handle]
+** cannot be changed after the [BLOB handle] is opened.
**
** ^(If the row that a BLOB handle points to is modified by an
** [UPDATE], [DELETE], or by [ON CONFLICT] side-effects
@@ -6194,6 +6229,10 @@ typedef struct sqlite3_blob sqlite3_blob;
**
** To avoid a resource leak, every open [BLOB handle] should eventually
** be released by a call to [sqlite3_blob_close()].
+**
+** See also: [sqlite3_blob_close()],
+** [sqlite3_blob_reopen()], [sqlite3_blob_read()],
+** [sqlite3_blob_bytes()], [sqlite3_blob_write()].
*/
SQLITE_API int sqlite3_blob_open(
sqlite3*,
@@ -6209,11 +6248,11 @@ SQLITE_API int sqlite3_blob_open(
** CAPI3REF: Move a BLOB Handle to a New Row
** METHOD: sqlite3_blob
**
-** ^This function is used to move an existing blob handle so that it points
+** ^This function is used to move an existing [BLOB handle] so that it points
** to a different row of the same database table. ^The new row is identified
** by the rowid value passed as the second argument. Only the row can be
** changed. ^The database, table and column on which the blob handle is open
-** remain the same. Moving an existing blob handle to a new row can be
+** remain the same. Moving an existing [BLOB handle] to a new row is
** faster than closing the existing handle and opening a new one.
**
** ^(The new row must meet the same criteria as for [sqlite3_blob_open()] -
@@ -8142,7 +8181,7 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3*);
**
** ^The [sqlite3_preupdate_hook()] interface registers a callback function
** that is invoked prior to each [INSERT], [UPDATE], and [DELETE] operation
-** on a [rowid table].
+** on a database table.
** ^At most one preupdate hook may be registered at a time on a single
** [database connection]; each call to [sqlite3_preupdate_hook()] overrides
** the previous setting.
@@ -8151,9 +8190,9 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3*);
** ^The third parameter to [sqlite3_preupdate_hook()] is passed through as
** the first parameter to callbacks.
**
-** ^The preupdate hook only fires for changes to [rowid tables]; the preupdate
-** hook is not invoked for changes to [virtual tables] or [WITHOUT ROWID]
-** tables.
+** ^The preupdate hook only fires for changes to real database tables; the
+** preupdate hook is not invoked for changes to [virtual tables] or to
+** system tables like sqlite_master or sqlite_stat1.
**
** ^The second parameter to the preupdate callback is a pointer to
** the [database connection] that registered the preupdate hook.
@@ -8167,12 +8206,16 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3*);
** databases.)^
** ^The fifth parameter to the preupdate callback is the name of the
** table that is being modified.
-** ^The sixth parameter to the preupdate callback is the initial [rowid] of the
-** row being changes for SQLITE_UPDATE and SQLITE_DELETE changes and is
-** undefined for SQLITE_INSERT changes.
-** ^The seventh parameter to the preupdate callback is the final [rowid] of
-** the row being changed for SQLITE_UPDATE and SQLITE_INSERT changes and is
-** undefined for SQLITE_DELETE changes.
+**
+** For an UPDATE or DELETE operation on a [rowid table], the sixth
+** parameter passed to the preupdate callback is the initial [rowid] of the
+** row being modified or deleted. For an INSERT operation on a rowid table,
+** or any operation on a WITHOUT ROWID table, the value of the sixth
+** parameter is undefined. For an INSERT or UPDATE on a rowid table the
+** seventh parameter is the final rowid value of the row being inserted
+** or updated. The value of the seventh parameter passed to the callback
+** function is not defined for operations on WITHOUT ROWID tables, or for
+** INSERT operations on rowid tables.
**
** The [sqlite3_preupdate_old()], [sqlite3_preupdate_new()],
** [sqlite3_preupdate_count()], and [sqlite3_preupdate_depth()] interfaces
@@ -8212,7 +8255,8 @@ SQLITE_API int sqlite3_db_cacheflush(sqlite3*);
**
** See also: [sqlite3_update_hook()]
*/
-SQLITE_API SQLITE_EXPERIMENTAL void *sqlite3_preupdate_hook(
+#if defined(SQLITE_ENABLE_PREUPDATE_HOOK)
+SQLITE_API void *sqlite3_preupdate_hook(
sqlite3 *db,
void(*xPreUpdate)(
void *pCtx, /* Copy of third arg to preupdate_hook() */
@@ -8225,10 +8269,11 @@ SQLITE_API SQLITE_EXPERIMENTAL void *sqlite3_preupdate_hook(
),
void*
);
-SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_preupdate_old(sqlite3 *, int, sqlite3_value **);
-SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_preupdate_count(sqlite3 *);
-SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_preupdate_depth(sqlite3 *);
-SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_preupdate_new(sqlite3 *, int, sqlite3_value **);
+SQLITE_API int sqlite3_preupdate_old(sqlite3 *, int, sqlite3_value **);
+SQLITE_API int sqlite3_preupdate_count(sqlite3 *);
+SQLITE_API int sqlite3_preupdate_depth(sqlite3 *);
+SQLITE_API int sqlite3_preupdate_new(sqlite3 *, int, sqlite3_value **);
+#endif
/*
** CAPI3REF: Low-level system error code
@@ -8244,7 +8289,7 @@ SQLITE_API int sqlite3_system_errno(sqlite3*);
/*
** CAPI3REF: Database Snapshot
-** KEYWORDS: {snapshot}
+** KEYWORDS: {snapshot} {sqlite3_snapshot}
** EXPERIMENTAL
**
** An instance of the snapshot object records the state of a [WAL mode]
@@ -8268,7 +8313,9 @@ SQLITE_API int sqlite3_system_errno(sqlite3*);
** to an historical snapshot (if possible). The destructor for
** sqlite3_snapshot objects is [sqlite3_snapshot_free()].
*/
-typedef struct sqlite3_snapshot sqlite3_snapshot;
+typedef struct sqlite3_snapshot {
+ unsigned char hidden[48];
+} sqlite3_snapshot;
/*
** CAPI3REF: Record A Database Snapshot
@@ -8279,9 +8326,32 @@ typedef struct sqlite3_snapshot sqlite3_snapshot;
** schema S in database connection D. ^On success, the
** [sqlite3_snapshot_get(D,S,P)] interface writes a pointer to the newly
** created [sqlite3_snapshot] object into *P and returns SQLITE_OK.
-** ^If schema S of [database connection] D is not a [WAL mode] database
-** that is in a read transaction, then [sqlite3_snapshot_get(D,S,P)]
-** leaves the *P value unchanged and returns an appropriate [error code].
+** If there is not already a read-transaction open on schema S when
+** this function is called, one is opened automatically.
+**
+** The following must be true for this function to succeed. If any of
+** the following statements are false when sqlite3_snapshot_get() is
+** called, SQLITE_ERROR is returned. The final value of *P is undefined
+** in this case.
+**
+** <ul>
+** <li> The database handle must be in [autocommit mode].
+**
+** <li> Schema S of [database connection] D must be a [WAL mode] database.
+**
+** <li> There must not be a write transaction open on schema S of database
+** connection D.
+**
+** <li> One or more transactions must have been written to the current wal
+** file since it was created on disk (by any connection). This means
+** that a snapshot cannot be taken on a wal mode database with no wal
+** file immediately after it is first opened. At least one transaction
+** must be written to it first.
+** </ul>
+**
+** This function may also return SQLITE_NOMEM. If it is called with the
+** database handle in autocommit mode but fails for some other reason,
+** whether or not a read transaction is opened on schema S is undefined.
**
** The [sqlite3_snapshot] object returned from a successful call to
** [sqlite3_snapshot_get()] must be freed using [sqlite3_snapshot_free()]
@@ -8375,6 +8445,28 @@ SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_cmp(
);
/*
+** CAPI3REF: Recover snapshots from a wal file
+** EXPERIMENTAL
+**
+** If all connections disconnect from a database file but do not perform
+** a checkpoint, the existing wal file is opened along with the database
+** file the next time the database is opened. At this point it is only
+** possible to successfully call sqlite3_snapshot_open() to open the most
+** recent snapshot of the database (the one at the head of the wal file),
+** even though the wal file may contain other valid snapshots for which
+** clients have sqlite3_snapshot handles.
+**
+** This function attempts to scan the wal file associated with database zDb
+** of database handle db and make all valid snapshots available to
+** sqlite3_snapshot_open(). It is an error if there is already a read
+** transaction open on the database, or if the database is not a wal mode
+** database.
+**
+** SQLITE_OK is returned if successful, or an SQLite error code otherwise.
+*/
+SQLITE_API SQLITE_EXPERIMENTAL int sqlite3_snapshot_recover(sqlite3 *db, const char *zDb);
+
+/*
** Undo the hack that converts floating point types to integer for
** builds on processors without floating point support.
*/
@@ -8559,7 +8651,7 @@ typedef struct sqlite3_changeset_iter sqlite3_changeset_iter;
** attached database. It is not an error if database zDb is not attached
** to the database when the session object is created.
*/
-int sqlite3session_create(
+SQLITE_API int sqlite3session_create(
sqlite3 *db, /* Database handle */
const char *zDb, /* Name of db (e.g. "main") */
sqlite3_session **ppSession /* OUT: New session object */
@@ -8577,7 +8669,7 @@ int sqlite3session_create(
** are attached is closed. Refer to the documentation for
** [sqlite3session_create()] for details.
*/
-void sqlite3session_delete(sqlite3_session *pSession);
+SQLITE_API void sqlite3session_delete(sqlite3_session *pSession);
/*
@@ -8597,7 +8689,7 @@ void sqlite3session_delete(sqlite3_session *pSession);
** The return value indicates the final state of the session object: 0 if
** the session is disabled, or 1 if it is enabled.
*/
-int sqlite3session_enable(sqlite3_session *pSession, int bEnable);
+SQLITE_API int sqlite3session_enable(sqlite3_session *pSession, int bEnable);
/*
** CAPI3REF: Set Or Clear the Indirect Change Flag
@@ -8626,7 +8718,7 @@ int sqlite3session_enable(sqlite3_session *pSession, int bEnable);
** The return value indicates the final state of the indirect flag: 0 if
** it is clear, or 1 if it is set.
*/
-int sqlite3session_indirect(sqlite3_session *pSession, int bIndirect);
+SQLITE_API int sqlite3session_indirect(sqlite3_session *pSession, int bIndirect);
/*
** CAPI3REF: Attach A Table To A Session Object
@@ -8656,7 +8748,7 @@ int sqlite3session_indirect(sqlite3_session *pSession, int bIndirect);
** SQLITE_OK is returned if the call completes without error. Or, if an error
** occurs, an SQLite error code (e.g. SQLITE_NOMEM) is returned.
*/
-int sqlite3session_attach(
+SQLITE_API int sqlite3session_attach(
sqlite3_session *pSession, /* Session object */
const char *zTab /* Table name */
);
@@ -8670,7 +8762,7 @@ int sqlite3session_attach(
** If xFilter returns 0, changes is not tracked. Note that once a table is
** attached, xFilter will not be called again.
*/
-void sqlite3session_table_filter(
+SQLITE_API void sqlite3session_table_filter(
sqlite3_session *pSession, /* Session object */
int(*xFilter)(
void *pCtx, /* Copy of third arg to _filter_table() */
@@ -8783,7 +8875,7 @@ void sqlite3session_table_filter(
** another field of the same row is updated while the session is enabled, the
** resulting changeset will contain an UPDATE change that updates both fields.
*/
-int sqlite3session_changeset(
+SQLITE_API int sqlite3session_changeset(
sqlite3_session *pSession, /* Session object */
int *pnChangeset, /* OUT: Size of buffer at *ppChangeset */
void **ppChangeset /* OUT: Buffer containing changeset */
@@ -8827,7 +8919,8 @@ int sqlite3session_changeset(
** the from-table, a DELETE record is added to the session object.
**
** <li> For each row (primary key) that exists in both tables, but features
-** different in each, an UPDATE record is added to the session.
+** different non-PK values in each, an UPDATE record is added to the
+** session.
** </ul>
**
** To clarify, if this function is called and then a changeset constructed
@@ -8844,7 +8937,7 @@ int sqlite3session_changeset(
** message. It is the responsibility of the caller to free this buffer using
** sqlite3_free().
*/
-int sqlite3session_diff(
+SQLITE_API int sqlite3session_diff(
sqlite3_session *pSession,
const char *zFromDb,
const char *zTbl,
@@ -8880,7 +8973,7 @@ int sqlite3session_diff(
** a single table are grouped together, tables appear in the order in which
** they were attached to the session object).
*/
-int sqlite3session_patchset(
+SQLITE_API int sqlite3session_patchset(
sqlite3_session *pSession, /* Session object */
int *pnPatchset, /* OUT: Size of buffer at *ppChangeset */
void **ppPatchset /* OUT: Buffer containing changeset */
@@ -8901,7 +8994,7 @@ int sqlite3session_patchset(
** guaranteed that a call to sqlite3session_changeset() will return a
** changeset containing zero changes.
*/
-int sqlite3session_isempty(sqlite3_session *pSession);
+SQLITE_API int sqlite3session_isempty(sqlite3_session *pSession);
/*
** CAPI3REF: Create An Iterator To Traverse A Changeset
@@ -8936,7 +9029,7 @@ int sqlite3session_isempty(sqlite3_session *pSession);
** the applies to table X, then one for table Y, and then later on visit
** another change for table X.
*/
-int sqlite3changeset_start(
+SQLITE_API int sqlite3changeset_start(
sqlite3_changeset_iter **pp, /* OUT: New changeset iterator handle */
int nChangeset, /* Size of changeset blob in bytes */
void *pChangeset /* Pointer to blob containing changeset */
@@ -8965,7 +9058,7 @@ int sqlite3changeset_start(
** codes include SQLITE_CORRUPT (if the changeset buffer is corrupt) or
** SQLITE_NOMEM.
*/
-int sqlite3changeset_next(sqlite3_changeset_iter *pIter);
+SQLITE_API int sqlite3changeset_next(sqlite3_changeset_iter *pIter);
/*
** CAPI3REF: Obtain The Current Operation From A Changeset Iterator
@@ -8993,7 +9086,7 @@ int sqlite3changeset_next(sqlite3_changeset_iter *pIter);
** SQLite error code is returned. The values of the output variables may not
** be trusted in this case.
*/
-int sqlite3changeset_op(
+SQLITE_API int sqlite3changeset_op(
sqlite3_changeset_iter *pIter, /* Iterator object */
const char **pzTab, /* OUT: Pointer to table name */
int *pnCol, /* OUT: Number of columns in table */
@@ -9026,7 +9119,7 @@ int sqlite3changeset_op(
** SQLITE_OK is returned and the output variables populated as described
** above.
*/
-int sqlite3changeset_pk(
+SQLITE_API int sqlite3changeset_pk(
sqlite3_changeset_iter *pIter, /* Iterator object */
unsigned char **pabPK, /* OUT: Array of boolean - true for PK cols */
int *pnCol /* OUT: Number of entries in output array */
@@ -9056,7 +9149,7 @@ int sqlite3changeset_pk(
** If some other error occurs (e.g. an OOM condition), an SQLite error code
** is returned and *ppValue is set to NULL.
*/
-int sqlite3changeset_old(
+SQLITE_API int sqlite3changeset_old(
sqlite3_changeset_iter *pIter, /* Changeset iterator */
int iVal, /* Column number */
sqlite3_value **ppValue /* OUT: Old value (or NULL pointer) */
@@ -9089,7 +9182,7 @@ int sqlite3changeset_old(
** If some other error occurs (e.g. an OOM condition), an SQLite error code
** is returned and *ppValue is set to NULL.
*/
-int sqlite3changeset_new(
+SQLITE_API int sqlite3changeset_new(
sqlite3_changeset_iter *pIter, /* Changeset iterator */
int iVal, /* Column number */
sqlite3_value **ppValue /* OUT: New value (or NULL pointer) */
@@ -9116,7 +9209,7 @@ int sqlite3changeset_new(
** If some other error occurs (e.g. an OOM condition), an SQLite error code
** is returned and *ppValue is set to NULL.
*/
-int sqlite3changeset_conflict(
+SQLITE_API int sqlite3changeset_conflict(
sqlite3_changeset_iter *pIter, /* Changeset iterator */
int iVal, /* Column number */
sqlite3_value **ppValue /* OUT: Value from conflicting row */
@@ -9132,7 +9225,7 @@ int sqlite3changeset_conflict(
**
** In all other cases this function returns SQLITE_MISUSE.
*/
-int sqlite3changeset_fk_conflicts(
+SQLITE_API int sqlite3changeset_fk_conflicts(
sqlite3_changeset_iter *pIter, /* Changeset iterator */
int *pnOut /* OUT: Number of FK violations */
);
@@ -9165,7 +9258,7 @@ int sqlite3changeset_fk_conflicts(
** // An error has occurred
** }
*/
-int sqlite3changeset_finalize(sqlite3_changeset_iter *pIter);
+SQLITE_API int sqlite3changeset_finalize(sqlite3_changeset_iter *pIter);
/*
** CAPI3REF: Invert A Changeset
@@ -9195,7 +9288,7 @@ int sqlite3changeset_finalize(sqlite3_changeset_iter *pIter);
** WARNING/TODO: This function currently assumes that the input is a valid
** changeset. If it is not, the results are undefined.
*/
-int sqlite3changeset_invert(
+SQLITE_API int sqlite3changeset_invert(
int nIn, const void *pIn, /* Input changeset */
int *pnOut, void **ppOut /* OUT: Inverse of input */
);
@@ -9224,7 +9317,7 @@ int sqlite3changeset_invert(
**
** Refer to the sqlite3_changegroup documentation below for details.
*/
-int sqlite3changeset_concat(
+SQLITE_API int sqlite3changeset_concat(
int nA, /* Number of bytes in buffer pA */
void *pA, /* Pointer to buffer containing changeset A */
int nB, /* Number of bytes in buffer pB */
@@ -9412,7 +9505,7 @@ void sqlite3changegroup_delete(sqlite3_changegroup*);
** <ul>
** <li> The table has the same name as the name recorded in the
** changeset, and
-** <li> The table has the same number of columns as recorded in the
+** <li> The table has at least as many columns as recorded in the
** changeset, and
** <li> The table has primary key columns in the same position as
** recorded in the changeset.
@@ -9457,7 +9550,11 @@ void sqlite3changegroup_delete(sqlite3_changegroup*);
** If a row with matching primary key values is found, but one or more of
** the non-primary key fields contains a value different from the original
** row value stored in the changeset, the conflict-handler function is
-** invoked with [SQLITE_CHANGESET_DATA] as the second argument.
+** invoked with [SQLITE_CHANGESET_DATA] as the second argument. If the
+** database table has more columns than are recorded in the changeset,
+** only the values of those non-primary key fields are compared against
+** the current database contents - any trailing database table columns
+** are ignored.
**
** If no row with matching primary key values is found in the database,
** the conflict-handler function is invoked with [SQLITE_CHANGESET_NOTFOUND]
@@ -9472,7 +9569,9 @@ void sqlite3changegroup_delete(sqlite3_changegroup*);
**
** <dt>INSERT Changes<dd>
** For each INSERT change, an attempt is made to insert the new row into
-** the database.
+** the database. If the changeset row contains fewer fields than the
+** database table, the trailing fields are populated with their default
+** values.
**
** If the attempt to insert the row fails because the database already
** contains a row with the same primary key values, the conflict handler
@@ -9490,13 +9589,13 @@ void sqlite3changegroup_delete(sqlite3_changegroup*);
** For each UPDATE change, this function checks if the target database
** contains a row with the same primary key value (or values) as the
** original row values stored in the changeset. If it does, and the values
-** stored in all non-primary key columns also match the values stored in
-** the changeset the row is updated within the target database.
+** stored in all modified non-primary key columns also match the values
+** stored in the changeset the row is updated within the target database.
**
** If a row with matching primary key values is found, but one or more of
-** the non-primary key fields contains a value different from an original
-** row value stored in the changeset, the conflict-handler function is
-** invoked with [SQLITE_CHANGESET_DATA] as the second argument. Since
+** the modified non-primary key fields contains a value different from an
+** original row value stored in the changeset, the conflict-handler function
+** is invoked with [SQLITE_CHANGESET_DATA] as the second argument. Since
** UPDATE changes only contain values for non-primary key fields that are
** to be modified, only those fields need to match the original values to
** avoid the SQLITE_CHANGESET_DATA conflict-handler callback.
@@ -9524,7 +9623,7 @@ void sqlite3changegroup_delete(sqlite3_changegroup*);
** rolled back, restoring the target database to its original state, and an
** SQLite error code returned.
*/
-int sqlite3changeset_apply(
+SQLITE_API int sqlite3changeset_apply(
sqlite3 *db, /* Apply change to "main" db of this handle */
int nChangeset, /* Size of changeset in bytes */
void *pChangeset, /* Changeset blob */
@@ -9725,7 +9824,7 @@ int sqlite3changeset_apply(
** parameter set to a value less than or equal to zero. Other than this,
** no guarantees are made as to the size of the chunks of data returned.
*/
-int sqlite3changeset_apply_strm(
+SQLITE_API int sqlite3changeset_apply_strm(
sqlite3 *db, /* Apply change to "main" db of this handle */
int (*xInput)(void *pIn, void *pData, int *pnData), /* Input function */
void *pIn, /* First arg for xInput */
@@ -9740,7 +9839,7 @@ int sqlite3changeset_apply_strm(
),
void *pCtx /* First argument passed to xConflict */
);
-int sqlite3changeset_concat_strm(
+SQLITE_API int sqlite3changeset_concat_strm(
int (*xInputA)(void *pIn, void *pData, int *pnData),
void *pInA,
int (*xInputB)(void *pIn, void *pData, int *pnData),
@@ -9748,23 +9847,23 @@ int sqlite3changeset_concat_strm(
int (*xOutput)(void *pOut, const void *pData, int nData),
void *pOut
);
-int sqlite3changeset_invert_strm(
+SQLITE_API int sqlite3changeset_invert_strm(
int (*xInput)(void *pIn, void *pData, int *pnData),
void *pIn,
int (*xOutput)(void *pOut, const void *pData, int nData),
void *pOut
);
-int sqlite3changeset_start_strm(
+SQLITE_API int sqlite3changeset_start_strm(
sqlite3_changeset_iter **pp,
int (*xInput)(void *pIn, void *pData, int *pnData),
void *pIn
);
-int sqlite3session_changeset_strm(
+SQLITE_API int sqlite3session_changeset_strm(
sqlite3_session *pSession,
int (*xOutput)(void *pOut, const void *pData, int nData),
void *pOut
);
-int sqlite3session_patchset_strm(
+SQLITE_API int sqlite3session_patchset_strm(
sqlite3_session *pSession,
int (*xOutput)(void *pOut, const void *pData, int nData),
void *pOut
diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3.go b/vendor/github.com/mattn/go-sqlite3/sqlite3.go
index cbe3964..33b9b9c 100644
--- a/vendor/github.com/mattn/go-sqlite3/sqlite3.go
+++ b/vendor/github.com/mattn/go-sqlite3/sqlite3.go
@@ -10,6 +10,7 @@ package sqlite3
#cgo CFLAGS: -DSQLITE_ENABLE_RTREE -DSQLITE_THREADSAFE
#cgo CFLAGS: -DSQLITE_ENABLE_FTS3 -DSQLITE_ENABLE_FTS3_PARENTHESIS -DSQLITE_ENABLE_FTS4_UNICODE61
#cgo CFLAGS: -DSQLITE_TRACE_SIZE_LIMIT=15
+#cgo CFLAGS: -DSQLITE_DISABLE_INTRINSIC
#cgo CFLAGS: -Wno-deprecated-declarations
#ifndef USE_LIBSQLITE3
#include <sqlite3-binding.h>
@@ -382,14 +383,14 @@ func (c *SQLiteConn) RegisterFunc(name string, impl interface{}, pure bool) erro
if pure {
opts |= C.SQLITE_DETERMINISTIC
}
- rv := sqlite3_create_function(c.db, cname, C.int(numArgs), C.int(opts), newHandle(c, &fi), C.callbackTrampoline, nil, nil)
+ rv := sqlite3CreateFunction(c.db, cname, C.int(numArgs), C.int(opts), newHandle(c, &fi), C.callbackTrampoline, nil, nil)
if rv != C.SQLITE_OK {
return c.lastError()
}
return nil
}
-func sqlite3_create_function(db *C.sqlite3, zFunctionName *C.char, nArg C.int, eTextRep C.int, pApp uintptr, xFunc unsafe.Pointer, xStep unsafe.Pointer, xFinal unsafe.Pointer) C.int {
+func sqlite3CreateFunction(db *C.sqlite3, zFunctionName *C.char, nArg C.int, eTextRep C.int, pApp uintptr, xFunc unsafe.Pointer, xStep unsafe.Pointer, xFinal unsafe.Pointer) C.int {
return C._sqlite3_create_function(db, zFunctionName, nArg, eTextRep, C.uintptr_t(pApp), (*[0]byte)(unsafe.Pointer(xFunc)), (*[0]byte)(unsafe.Pointer(xStep)), (*[0]byte)(unsafe.Pointer(xFinal)))
}
@@ -398,11 +399,19 @@ func (c *SQLiteConn) AutoCommit() bool {
return int(C.sqlite3_get_autocommit(c.db)) != 0
}
-func (c *SQLiteConn) lastError() Error {
+func (c *SQLiteConn) lastError() error {
+ return lastError(c.db)
+}
+
+func lastError(db *C.sqlite3) error {
+ rv := C.sqlite3_errcode(db)
+ if rv == C.SQLITE_OK {
+ return nil
+ }
return Error{
- Code: ErrNo(C.sqlite3_errcode(c.db)),
- ExtendedCode: ErrNoExtended(C.sqlite3_extended_errcode(c.db)),
- err: C.GoString(C.sqlite3_errmsg(c.db)),
+ Code: ErrNo(rv),
+ ExtendedCode: ErrNoExtended(C.sqlite3_extended_errcode(db)),
+ err: C.GoString(C.sqlite3_errmsg(db)),
}
}
@@ -430,7 +439,7 @@ func (c *SQLiteConn) exec(ctx context.Context, query string, args []namedValue)
na := s.NumInput()
if len(args) < na {
s.Close()
- return nil, fmt.Errorf("Not enough args to execute query. Expected %d, got %d.", na, len(args))
+ return nil, fmt.Errorf("not enough args to execute query: want %d got %d", na, len(args))
}
for i := 0; i < na; i++ {
args[i].Ordinal -= start
@@ -480,7 +489,7 @@ func (c *SQLiteConn) query(ctx context.Context, query string, args []namedValue)
s.(*SQLiteStmt).cls = true
na := s.NumInput()
if len(args) < na {
- return nil, fmt.Errorf("Not enough args to execute query. Expected %d, got %d.", na, len(args))
+ return nil, fmt.Errorf("not enough args to execute query: want %d got %d", na, len(args))
}
for i := 0; i < na; i++ {
args[i].Ordinal -= start
@@ -532,6 +541,8 @@ func errorString(err Error) string {
// _txlock=XXX
// Specify locking behavior for transactions. XXX can be "immediate",
// "deferred", "exclusive".
+// _foreign_keys=X
+// Enable or disable enforcement of foreign keys. X can be 1 or 0.
func (d *SQLiteDriver) Open(dsn string) (driver.Conn, error) {
if C.sqlite3_threadsafe() == 0 {
return nil, errors.New("sqlite library was not compiled for thread-safe operation")
@@ -540,6 +551,7 @@ func (d *SQLiteDriver) Open(dsn string) (driver.Conn, error) {
var loc *time.Location
txlock := "BEGIN"
busyTimeout := 5000
+ foreignKeys := -1
pos := strings.IndexRune(dsn, '?')
if pos >= 1 {
params, err := url.ParseQuery(dsn[pos+1:])
@@ -582,6 +594,18 @@ func (d *SQLiteDriver) Open(dsn string) (driver.Conn, error) {
}
}
+ // _foreign_keys
+ if val := params.Get("_foreign_keys"); val != "" {
+ switch val {
+ case "1":
+ foreignKeys = 1
+ case "0":
+ foreignKeys = 0
+ default:
+ return nil, fmt.Errorf("Invalid _foreign_keys: %v", val)
+ }
+ }
+
if !strings.HasPrefix(dsn, "file:") {
dsn = dsn[:pos]
}
@@ -604,19 +628,43 @@ func (d *SQLiteDriver) Open(dsn string) (driver.Conn, error) {
rv = C.sqlite3_busy_timeout(db, C.int(busyTimeout))
if rv != C.SQLITE_OK {
+ C.sqlite3_close_v2(db)
return nil, Error{Code: ErrNo(rv)}
}
+ exec := func(s string) error {
+ cs := C.CString(s)
+ rv := C.sqlite3_exec(db, cs, nil, nil, nil)
+ C.free(unsafe.Pointer(cs))
+ if rv != C.SQLITE_OK {
+ return lastError(db)
+ }
+ return nil
+ }
+ if foreignKeys == 0 {
+ if err := exec("PRAGMA foreign_keys = OFF;"); err != nil {
+ C.sqlite3_close_v2(db)
+ return nil, err
+ }
+ } else if foreignKeys == 1 {
+ if err := exec("PRAGMA foreign_keys = ON;"); err != nil {
+ C.sqlite3_close_v2(db)
+ return nil, err
+ }
+ }
+
conn := &SQLiteConn{db: db, loc: loc, txlock: txlock}
if len(d.Extensions) > 0 {
if err := conn.loadExtensions(d.Extensions); err != nil {
+ conn.Close()
return nil, err
}
}
if d.ConnectHook != nil {
if err := d.ConnectHook(conn); err != nil {
+ conn.Close()
return nil, err
}
}
@@ -626,11 +674,11 @@ func (d *SQLiteDriver) Open(dsn string) (driver.Conn, error) {
// Close the connection.
func (c *SQLiteConn) Close() error {
- deleteHandles(c)
rv := C.sqlite3_close_v2(c.db)
if rv != C.SQLITE_OK {
return c.lastError()
}
+ deleteHandles(c)
c.db = nil
runtime.SetFinalizer(c, nil)
return nil
diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_context.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_context.go
new file mode 100644
index 0000000..78ced18
--- /dev/null
+++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_context.go
@@ -0,0 +1,103 @@
+// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+
+package sqlite3
+
+/*
+
+#ifndef USE_LIBSQLITE3
+#include <sqlite3-binding.h>
+#else
+#include <sqlite3.h>
+#endif
+#include <stdlib.h>
+// These wrappers are necessary because SQLITE_TRANSIENT
+// is a pointer constant, and cgo doesn't translate them correctly.
+
+static inline void my_result_text(sqlite3_context *ctx, char *p, int np) {
+ sqlite3_result_text(ctx, p, np, SQLITE_TRANSIENT);
+}
+
+static inline void my_result_blob(sqlite3_context *ctx, void *p, int np) {
+ sqlite3_result_blob(ctx, p, np, SQLITE_TRANSIENT);
+}
+*/
+import "C"
+
+import (
+ "math"
+ "reflect"
+ "unsafe"
+)
+
+const i64 = unsafe.Sizeof(int(0)) > 4
+
+// SQLiteContext behave sqlite3_context
+type SQLiteContext C.sqlite3_context
+
+// ResultBool sets the result of an SQL function.
+func (c *SQLiteContext) ResultBool(b bool) {
+ if b {
+ c.ResultInt(1)
+ } else {
+ c.ResultInt(0)
+ }
+}
+
+// ResultBlob sets the result of an SQL function.
+// See: sqlite3_result_blob, http://sqlite.org/c3ref/result_blob.html
+func (c *SQLiteContext) ResultBlob(b []byte) {
+ if i64 && len(b) > math.MaxInt32 {
+ C.sqlite3_result_error_toobig((*C.sqlite3_context)(c))
+ return
+ }
+ var p *byte
+ if len(b) > 0 {
+ p = &b[0]
+ }
+ C.my_result_blob((*C.sqlite3_context)(c), unsafe.Pointer(p), C.int(len(b)))
+}
+
+// ResultDouble sets the result of an SQL function.
+// See: sqlite3_result_double, http://sqlite.org/c3ref/result_blob.html
+func (c *SQLiteContext) ResultDouble(d float64) {
+ C.sqlite3_result_double((*C.sqlite3_context)(c), C.double(d))
+}
+
+// ResultInt sets the result of an SQL function.
+// See: sqlite3_result_int, http://sqlite.org/c3ref/result_blob.html
+func (c *SQLiteContext) ResultInt(i int) {
+ if i64 && (i > math.MaxInt32 || i < math.MinInt32) {
+ C.sqlite3_result_int64((*C.sqlite3_context)(c), C.sqlite3_int64(i))
+ } else {
+ C.sqlite3_result_int((*C.sqlite3_context)(c), C.int(i))
+ }
+}
+
+// ResultInt64 sets the result of an SQL function.
+// See: sqlite3_result_int64, http://sqlite.org/c3ref/result_blob.html
+func (c *SQLiteContext) ResultInt64(i int64) {
+ C.sqlite3_result_int64((*C.sqlite3_context)(c), C.sqlite3_int64(i))
+}
+
+// ResultNull sets the result of an SQL function.
+// See: sqlite3_result_null, http://sqlite.org/c3ref/result_blob.html
+func (c *SQLiteContext) ResultNull() {
+ C.sqlite3_result_null((*C.sqlite3_context)(c))
+}
+
+// ResultText sets the result of an SQL function.
+// See: sqlite3_result_text, http://sqlite.org/c3ref/result_blob.html
+func (c *SQLiteContext) ResultText(s string) {
+ h := (*reflect.StringHeader)(unsafe.Pointer(&s))
+ cs, l := (*C.char)(unsafe.Pointer(h.Data)), C.int(h.Len)
+ C.my_result_text((*C.sqlite3_context)(c), cs, l)
+}
+
+// ResultZeroblob sets the result of an SQL function.
+// See: sqlite3_result_zeroblob, http://sqlite.org/c3ref/result_blob.html
+func (c *SQLiteContext) ResultZeroblob(n int) {
+ C.sqlite3_result_zeroblob((*C.sqlite3_context)(c), C.int(n))
+}
diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension.go
index b5bccb1..bb7e25f 100644
--- a/vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension.go
+++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_load_extension.go
@@ -31,6 +31,7 @@ func (c *SQLiteConn) loadExtensions(extensions []string) error {
defer C.free(unsafe.Pointer(cext))
rv = C.sqlite3_load_extension(c.db, cext, nil, nil)
if rv != C.SQLITE_OK {
+ C.sqlite3_enable_load_extension(c.db, 0)
return errors.New(C.GoString(C.sqlite3_errmsg(c.db)))
}
}
diff --git a/vendor/github.com/mattn/go-sqlite3/tracecallback.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_trace.go
index de1d504..a75f52a 100644
--- a/vendor/github.com/mattn/go-sqlite3/tracecallback.go
+++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_trace.go
@@ -357,7 +357,7 @@ func (c *SQLiteConn) RegisterAggregator(name string, impl interface{}, pure bool
if pure {
opts |= C.SQLITE_DETERMINISTIC
}
- rv := sqlite3_create_function(c.db, cname, C.int(stepNArgs), C.int(opts), newHandle(c, &ai), nil, C.stepTrampoline, C.doneTrampoline)
+ rv := sqlite3CreateFunction(c.db, cname, C.int(stepNArgs), C.int(opts), newHandle(c, &ai), nil, C.stepTrampoline, C.doneTrampoline)
if rv != C.SQLITE_OK {
return c.lastError()
}
diff --git a/vendor/github.com/mattn/go-sqlite3/sqlite3_vtable.go b/vendor/github.com/mattn/go-sqlite3/sqlite3_vtable.go
new file mode 100644
index 0000000..8bef291
--- /dev/null
+++ b/vendor/github.com/mattn/go-sqlite3/sqlite3_vtable.go
@@ -0,0 +1,646 @@
+// Copyright (C) 2014 Yasuhiro Matsumoto <mattn.jp@gmail.com>.
+//
+// Use of this source code is governed by an MIT-style
+// license that can be found in the LICENSE file.
+// +build vtable
+
+package sqlite3
+
+/*
+#cgo CFLAGS: -std=gnu99
+#cgo CFLAGS: -DSQLITE_ENABLE_RTREE -DSQLITE_THREADSAFE
+#cgo CFLAGS: -DSQLITE_ENABLE_FTS3 -DSQLITE_ENABLE_FTS3_PARENTHESIS -DSQLITE_ENABLE_FTS4_UNICODE61
+#cgo CFLAGS: -DSQLITE_TRACE_SIZE_LIMIT=15
+#cgo CFLAGS: -DSQLITE_ENABLE_COLUMN_METADATA=1
+#cgo CFLAGS: -Wno-deprecated-declarations
+
+#ifndef USE_LIBSQLITE3
+#include <sqlite3-binding.h>
+#else
+#include <sqlite3.h>
+#endif
+#include <stdlib.h>
+#include <stdint.h>
+#include <memory.h>
+
+static inline char *_sqlite3_mprintf(char *zFormat, char *arg) {
+ return sqlite3_mprintf(zFormat, arg);
+}
+
+typedef struct goVTab goVTab;
+
+struct goVTab {
+ sqlite3_vtab base;
+ void *vTab;
+};
+
+uintptr_t goMInit(void *db, void *pAux, int argc, char **argv, char **pzErr, int isCreate);
+
+static int cXInit(sqlite3 *db, void *pAux, int argc, const char *const*argv, sqlite3_vtab **ppVTab, char **pzErr, int isCreate) {
+ void *vTab = (void *)goMInit(db, pAux, argc, (char**)argv, pzErr, isCreate);
+ if (!vTab || *pzErr) {
+ return SQLITE_ERROR;
+ }
+ goVTab *pvTab = (goVTab *)sqlite3_malloc(sizeof(goVTab));
+ if (!pvTab) {
+ *pzErr = sqlite3_mprintf("%s", "Out of memory");
+ return SQLITE_NOMEM;
+ }
+ memset(pvTab, 0, sizeof(goVTab));
+ pvTab->vTab = vTab;
+
+ *ppVTab = (sqlite3_vtab *)pvTab;
+ *pzErr = 0;
+ return SQLITE_OK;
+}
+
+static inline int cXCreate(sqlite3 *db, void *pAux, int argc, const char *const*argv, sqlite3_vtab **ppVTab, char **pzErr) {
+ return cXInit(db, pAux, argc, argv, ppVTab, pzErr, 1);
+}
+static inline int cXConnect(sqlite3 *db, void *pAux, int argc, const char *const*argv, sqlite3_vtab **ppVTab, char **pzErr) {
+ return cXInit(db, pAux, argc, argv, ppVTab, pzErr, 0);
+}
+
+char* goVBestIndex(void *pVTab, void *icp);
+
+static inline int cXBestIndex(sqlite3_vtab *pVTab, sqlite3_index_info *info) {
+ char *pzErr = goVBestIndex(((goVTab*)pVTab)->vTab, info);
+ if (pzErr) {
+ if (pVTab->zErrMsg)
+ sqlite3_free(pVTab->zErrMsg);
+ pVTab->zErrMsg = pzErr;
+ return SQLITE_ERROR;
+ }
+ return SQLITE_OK;
+}
+
+char* goVRelease(void *pVTab, int isDestroy);
+
+static int cXRelease(sqlite3_vtab *pVTab, int isDestroy) {
+ char *pzErr = goVRelease(((goVTab*)pVTab)->vTab, isDestroy);
+ if (pzErr) {
+ if (pVTab->zErrMsg)
+ sqlite3_free(pVTab->zErrMsg);
+ pVTab->zErrMsg = pzErr;
+ return SQLITE_ERROR;
+ }
+ if (pVTab->zErrMsg)
+ sqlite3_free(pVTab->zErrMsg);
+ sqlite3_free(pVTab);
+ return SQLITE_OK;
+}
+
+static inline int cXDisconnect(sqlite3_vtab *pVTab) {
+ return cXRelease(pVTab, 0);
+}
+static inline int cXDestroy(sqlite3_vtab *pVTab) {
+ return cXRelease(pVTab, 1);
+}
+
+typedef struct goVTabCursor goVTabCursor;
+
+struct goVTabCursor {
+ sqlite3_vtab_cursor base;
+ void *vTabCursor;
+};
+
+uintptr_t goVOpen(void *pVTab, char **pzErr);
+
+static int cXOpen(sqlite3_vtab *pVTab, sqlite3_vtab_cursor **ppCursor) {
+ void *vTabCursor = (void *)goVOpen(((goVTab*)pVTab)->vTab, &(pVTab->zErrMsg));
+ goVTabCursor *pCursor = (goVTabCursor *)sqlite3_malloc(sizeof(goVTabCursor));
+ if (!pCursor) {
+ return SQLITE_NOMEM;
+ }
+ memset(pCursor, 0, sizeof(goVTabCursor));
+ pCursor->vTabCursor = vTabCursor;
+ *ppCursor = (sqlite3_vtab_cursor *)pCursor;
+ return SQLITE_OK;
+}
+
+static int setErrMsg(sqlite3_vtab_cursor *pCursor, char *pzErr) {
+ if (pCursor->pVtab->zErrMsg)
+ sqlite3_free(pCursor->pVtab->zErrMsg);
+ pCursor->pVtab->zErrMsg = pzErr;
+ return SQLITE_ERROR;
+}
+
+char* goVClose(void *pCursor);
+
+static int cXClose(sqlite3_vtab_cursor *pCursor) {
+ char *pzErr = goVClose(((goVTabCursor*)pCursor)->vTabCursor);
+ if (pzErr) {
+ return setErrMsg(pCursor, pzErr);
+ }
+ sqlite3_free(pCursor);
+ return SQLITE_OK;
+}
+
+char* goVFilter(void *pCursor, int idxNum, char* idxName, int argc, sqlite3_value **argv);
+
+static int cXFilter(sqlite3_vtab_cursor *pCursor, int idxNum, const char *idxStr, int argc, sqlite3_value **argv) {
+ char *pzErr = goVFilter(((goVTabCursor*)pCursor)->vTabCursor, idxNum, (char*)idxStr, argc, argv);
+ if (pzErr) {
+ return setErrMsg(pCursor, pzErr);
+ }
+ return SQLITE_OK;
+}
+
+char* goVNext(void *pCursor);
+
+static int cXNext(sqlite3_vtab_cursor *pCursor) {
+ char *pzErr = goVNext(((goVTabCursor*)pCursor)->vTabCursor);
+ if (pzErr) {
+ return setErrMsg(pCursor, pzErr);
+ }
+ return SQLITE_OK;
+}
+
+int goVEof(void *pCursor);
+
+static inline int cXEof(sqlite3_vtab_cursor *pCursor) {
+ return goVEof(((goVTabCursor*)pCursor)->vTabCursor);
+}
+
+char* goVColumn(void *pCursor, void *cp, int col);
+
+static int cXColumn(sqlite3_vtab_cursor *pCursor, sqlite3_context *ctx, int i) {
+ char *pzErr = goVColumn(((goVTabCursor*)pCursor)->vTabCursor, ctx, i);
+ if (pzErr) {
+ return setErrMsg(pCursor, pzErr);
+ }
+ return SQLITE_OK;
+}
+
+char* goVRowid(void *pCursor, sqlite3_int64 *pRowid);
+
+static int cXRowid(sqlite3_vtab_cursor *pCursor, sqlite3_int64 *pRowid) {
+ char *pzErr = goVRowid(((goVTabCursor*)pCursor)->vTabCursor, pRowid);
+ if (pzErr) {
+ return setErrMsg(pCursor, pzErr);
+ }
+ return SQLITE_OK;
+}
+
+char* goVUpdate(void *pVTab, int argc, sqlite3_value **argv, sqlite3_int64 *pRowid);
+
+static int cXUpdate(sqlite3_vtab *pVTab, int argc, sqlite3_value **argv, sqlite3_int64 *pRowid) {
+ char *pzErr = goVUpdate(((goVTab*)pVTab)->vTab, argc, argv, pRowid);
+ if (pzErr) {
+ if (pVTab->zErrMsg)
+ sqlite3_free(pVTab->zErrMsg);
+ pVTab->zErrMsg = pzErr;
+ return SQLITE_ERROR;
+ }
+ return SQLITE_OK;
+}
+
+static sqlite3_module goModule = {
+ 0, // iVersion
+ cXCreate, // xCreate - create a table
+ cXConnect, // xConnect - connect to an existing table
+ cXBestIndex, // xBestIndex - Determine search strategy
+ cXDisconnect, // xDisconnect - Disconnect from a table
+ cXDestroy, // xDestroy - Drop a table
+ cXOpen, // xOpen - open a cursor
+ cXClose, // xClose - close a cursor
+ cXFilter, // xFilter - configure scan constraints
+ cXNext, // xNext - advance a cursor
+ cXEof, // xEof
+ cXColumn, // xColumn - read data
+ cXRowid, // xRowid - read data
+ cXUpdate, // xUpdate - write data
+// Not implemented
+ 0, // xBegin - begin transaction
+ 0, // xSync - sync transaction
+ 0, // xCommit - commit transaction
+ 0, // xRollback - rollback transaction
+ 0, // xFindFunction - function overloading
+ 0, // xRename - rename the table
+ 0, // xSavepoint
+ 0, // xRelease
+ 0 // xRollbackTo
+};
+
+void goMDestroy(void*);
+
+static int _sqlite3_create_module(sqlite3 *db, const char *zName, uintptr_t pClientData) {
+ return sqlite3_create_module_v2(db, zName, &goModule, (void*) pClientData, goMDestroy);
+}
+*/
+import "C"
+
+import (
+ "fmt"
+ "math"
+ "reflect"
+ "unsafe"
+)
+
+type sqliteModule struct {
+ c *SQLiteConn
+ name string
+ module Module
+}
+
+type sqliteVTab struct {
+ module *sqliteModule
+ vTab VTab
+}
+
+type sqliteVTabCursor struct {
+ vTab *sqliteVTab
+ vTabCursor VTabCursor
+}
+
+// Op is type of operations.
+type Op uint8
+
+// Op mean identity of operations.
+const (
+ OpEQ Op = 2
+ OpGT = 4
+ OpLE = 8
+ OpLT = 16
+ OpGE = 32
+ OpMATCH = 64
+ OpLIKE = 65 /* 3.10.0 and later only */
+ OpGLOB = 66 /* 3.10.0 and later only */
+ OpREGEXP = 67 /* 3.10.0 and later only */
+ OpScanUnique = 1 /* Scan visits at most 1 row */
+)
+
+// InfoConstraint give information of constraint.
+type InfoConstraint struct {
+ Column int
+ Op Op
+ Usable bool
+}
+
+// InfoOrderBy give information of order-by.
+type InfoOrderBy struct {
+ Column int
+ Desc bool
+}
+
+func constraints(info *C.sqlite3_index_info) []InfoConstraint {
+ l := info.nConstraint
+ slice := (*[1 << 30]C.struct_sqlite3_index_constraint)(unsafe.Pointer(info.aConstraint))[:l:l]
+
+ cst := make([]InfoConstraint, 0, l)
+ for _, c := range slice {
+ var usable bool
+ if c.usable > 0 {
+ usable = true
+ }
+ cst = append(cst, InfoConstraint{
+ Column: int(c.iColumn),
+ Op: Op(c.op),
+ Usable: usable,
+ })
+ }
+ return cst
+}
+
+func orderBys(info *C.sqlite3_index_info) []InfoOrderBy {
+ l := info.nOrderBy
+ slice := (*[1 << 30]C.struct_sqlite3_index_orderby)(unsafe.Pointer(info.aOrderBy))[:l:l]
+
+ ob := make([]InfoOrderBy, 0, l)
+ for _, c := range slice {
+ var desc bool
+ if c.desc > 0 {
+ desc = true
+ }
+ ob = append(ob, InfoOrderBy{
+ Column: int(c.iColumn),
+ Desc: desc,
+ })
+ }
+ return ob
+}
+
+// IndexResult is a Go struct representation of what eventually ends up in the
+// output fields for `sqlite3_index_info`
+// See: https://www.sqlite.org/c3ref/index_info.html
+type IndexResult struct {
+ Used []bool // aConstraintUsage
+ IdxNum int
+ IdxStr string
+ AlreadyOrdered bool // orderByConsumed
+ EstimatedCost float64
+ EstimatedRows float64
+}
+
+// mPrintf is a utility wrapper around sqlite3_mprintf
+func mPrintf(format, arg string) *C.char {
+ cf := C.CString(format)
+ defer C.free(unsafe.Pointer(cf))
+ ca := C.CString(arg)
+ defer C.free(unsafe.Pointer(ca))
+ return C._sqlite3_mprintf(cf, ca)
+}
+
+//export goMInit
+func goMInit(db, pClientData unsafe.Pointer, argc C.int, argv **C.char, pzErr **C.char, isCreate C.int) C.uintptr_t {
+ m := lookupHandle(uintptr(pClientData)).(*sqliteModule)
+ if m.c.db != (*C.sqlite3)(db) {
+ *pzErr = mPrintf("%s", "Inconsistent db handles")
+ return 0
+ }
+ args := make([]string, argc)
+ var A []*C.char
+ slice := reflect.SliceHeader{Data: uintptr(unsafe.Pointer(argv)), Len: int(argc), Cap: int(argc)}
+ a := reflect.NewAt(reflect.TypeOf(A), unsafe.Pointer(&slice)).Elem().Interface()
+ for i, s := range a.([]*C.char) {
+ args[i] = C.GoString(s)
+ }
+ var vTab VTab
+ var err error
+ if isCreate == 1 {
+ vTab, err = m.module.Create(m.c, args)
+ } else {
+ vTab, err = m.module.Connect(m.c, args)
+ }
+
+ if err != nil {
+ *pzErr = mPrintf("%s", err.Error())
+ return 0
+ }
+ vt := sqliteVTab{m, vTab}
+ *pzErr = nil
+ return C.uintptr_t(newHandle(m.c, &vt))
+}
+
+//export goVRelease
+func goVRelease(pVTab unsafe.Pointer, isDestroy C.int) *C.char {
+ vt := lookupHandle(uintptr(pVTab)).(*sqliteVTab)
+ var err error
+ if isDestroy == 1 {
+ err = vt.vTab.Destroy()
+ } else {
+ err = vt.vTab.Disconnect()
+ }
+ if err != nil {
+ return mPrintf("%s", err.Error())
+ }
+ return nil
+}
+
+//export goVOpen
+func goVOpen(pVTab unsafe.Pointer, pzErr **C.char) C.uintptr_t {
+ vt := lookupHandle(uintptr(pVTab)).(*sqliteVTab)
+ vTabCursor, err := vt.vTab.Open()
+ if err != nil {
+ *pzErr = mPrintf("%s", err.Error())
+ return 0
+ }
+ vtc := sqliteVTabCursor{vt, vTabCursor}
+ *pzErr = nil
+ return C.uintptr_t(newHandle(vt.module.c, &vtc))
+}
+
+//export goVBestIndex
+func goVBestIndex(pVTab unsafe.Pointer, icp unsafe.Pointer) *C.char {
+ vt := lookupHandle(uintptr(pVTab)).(*sqliteVTab)
+ info := (*C.sqlite3_index_info)(icp)
+ csts := constraints(info)
+ res, err := vt.vTab.BestIndex(csts, orderBys(info))
+ if err != nil {
+ return mPrintf("%s", err.Error())
+ }
+ if len(res.Used) != len(csts) {
+ return mPrintf("Result.Used != expected value", "")
+ }
+
+ // Get a pointer to constraint_usage struct so we can update in place.
+ l := info.nConstraint
+ s := (*[1 << 30]C.struct_sqlite3_index_constraint_usage)(unsafe.Pointer(info.aConstraintUsage))[:l:l]
+ index := 1
+ for i := C.int(0); i < info.nConstraint; i++ {
+ if res.Used[i] {
+ s[i].argvIndex = C.int(index)
+ s[i].omit = C.uchar(1)
+ index++
+ }
+ }
+
+ info.idxNum = C.int(res.IdxNum)
+ idxStr := C.CString(res.IdxStr)
+ defer C.free(unsafe.Pointer(idxStr))
+ info.idxStr = idxStr
+ info.needToFreeIdxStr = C.int(0)
+ if res.AlreadyOrdered {
+ info.orderByConsumed = C.int(1)
+ }
+ info.estimatedCost = C.double(res.EstimatedCost)
+ info.estimatedRows = C.sqlite3_int64(res.EstimatedRows)
+
+ return nil
+}
+
+//export goVClose
+func goVClose(pCursor unsafe.Pointer) *C.char {
+ vtc := lookupHandle(uintptr(pCursor)).(*sqliteVTabCursor)
+ err := vtc.vTabCursor.Close()
+ if err != nil {
+ return mPrintf("%s", err.Error())
+ }
+ return nil
+}
+
+//export goMDestroy
+func goMDestroy(pClientData unsafe.Pointer) {
+ m := lookupHandle(uintptr(pClientData)).(*sqliteModule)
+ m.module.DestroyModule()
+}
+
+//export goVFilter
+func goVFilter(pCursor unsafe.Pointer, idxNum C.int, idxName *C.char, argc C.int, argv **C.sqlite3_value) *C.char {
+ vtc := lookupHandle(uintptr(pCursor)).(*sqliteVTabCursor)
+ args := (*[(math.MaxInt32 - 1) / unsafe.Sizeof((*C.sqlite3_value)(nil))]*C.sqlite3_value)(unsafe.Pointer(argv))[:argc:argc]
+ vals := make([]interface{}, 0, argc)
+ for _, v := range args {
+ conv, err := callbackArgGeneric(v)
+ if err != nil {
+ return mPrintf("%s", err.Error())
+ }
+ vals = append(vals, conv.Interface())
+ }
+ err := vtc.vTabCursor.Filter(int(idxNum), C.GoString(idxName), vals)
+ if err != nil {
+ return mPrintf("%s", err.Error())
+ }
+ return nil
+}
+
+//export goVNext
+func goVNext(pCursor unsafe.Pointer) *C.char {
+ vtc := lookupHandle(uintptr(pCursor)).(*sqliteVTabCursor)
+ err := vtc.vTabCursor.Next()
+ if err != nil {
+ return mPrintf("%s", err.Error())
+ }
+ return nil
+}
+
+//export goVEof
+func goVEof(pCursor unsafe.Pointer) C.int {
+ vtc := lookupHandle(uintptr(pCursor)).(*sqliteVTabCursor)
+ err := vtc.vTabCursor.EOF()
+ if err {
+ return 1
+ }
+ return 0
+}
+
+//export goVColumn
+func goVColumn(pCursor, cp unsafe.Pointer, col C.int) *C.char {
+ vtc := lookupHandle(uintptr(pCursor)).(*sqliteVTabCursor)
+ c := (*SQLiteContext)(cp)
+ err := vtc.vTabCursor.Column(c, int(col))
+ if err != nil {
+ return mPrintf("%s", err.Error())
+ }
+ return nil
+}
+
+//export goVRowid
+func goVRowid(pCursor unsafe.Pointer, pRowid *C.sqlite3_int64) *C.char {
+ vtc := lookupHandle(uintptr(pCursor)).(*sqliteVTabCursor)
+ rowid, err := vtc.vTabCursor.Rowid()
+ if err != nil {
+ return mPrintf("%s", err.Error())
+ }
+ *pRowid = C.sqlite3_int64(rowid)
+ return nil
+}
+
+//export goVUpdate
+func goVUpdate(pVTab unsafe.Pointer, argc C.int, argv **C.sqlite3_value, pRowid *C.sqlite3_int64) *C.char {
+ vt := lookupHandle(uintptr(pVTab)).(*sqliteVTab)
+
+ var tname string
+ if n, ok := vt.vTab.(interface {
+ TableName() string
+ }); ok {
+ tname = n.TableName() + " "
+ }
+
+ err := fmt.Errorf("virtual %s table %sis read-only", vt.module.name, tname)
+ if v, ok := vt.vTab.(VTabUpdater); ok {
+ // convert argv
+ args := (*[(math.MaxInt32 - 1) / unsafe.Sizeof((*C.sqlite3_value)(nil))]*C.sqlite3_value)(unsafe.Pointer(argv))[:argc:argc]
+ vals := make([]interface{}, 0, argc)
+ for _, v := range args {
+ conv, err := callbackArgGeneric(v)
+ if err != nil {
+ return mPrintf("%s", err.Error())
+ }
+
+ // work around for SQLITE_NULL
+ x := conv.Interface()
+ if z, ok := x.([]byte); ok && z == nil {
+ x = nil
+ }
+
+ vals = append(vals, x)
+ }
+
+ switch {
+ case argc == 1:
+ err = v.Delete(vals[0])
+
+ case argc > 1 && vals[0] == nil:
+ var id int64
+ id, err = v.Insert(vals[1], vals[2:])
+ if err == nil {
+ *pRowid = C.sqlite3_int64(id)
+ }
+
+ case argc > 1:
+ err = v.Update(vals[1], vals[2:])
+ }
+ }
+
+ if err != nil {
+ return mPrintf("%s", err.Error())
+ }
+
+ return nil
+}
+
+// Module is a "virtual table module", it defines the implementation of a
+// virtual tables. See: http://sqlite.org/c3ref/module.html
+type Module interface {
+ // http://sqlite.org/vtab.html#xcreate
+ Create(c *SQLiteConn, args []string) (VTab, error)
+ // http://sqlite.org/vtab.html#xconnect
+ Connect(c *SQLiteConn, args []string) (VTab, error)
+ // http://sqlite.org/c3ref/create_module.html
+ DestroyModule()
+}
+
+// VTab describes a particular instance of the virtual table.
+// See: http://sqlite.org/c3ref/vtab.html
+type VTab interface {
+ // http://sqlite.org/vtab.html#xbestindex
+ BestIndex([]InfoConstraint, []InfoOrderBy) (*IndexResult, error)
+ // http://sqlite.org/vtab.html#xdisconnect
+ Disconnect() error
+ // http://sqlite.org/vtab.html#sqlite3_module.xDestroy
+ Destroy() error
+ // http://sqlite.org/vtab.html#xopen
+ Open() (VTabCursor, error)
+}
+
+// VTabUpdater is a type that allows a VTab to be inserted, updated, or
+// deleted.
+// See: https://sqlite.org/vtab.html#xupdate
+type VTabUpdater interface {
+ Delete(interface{}) error
+ Insert(interface{}, []interface{}) (int64, error)
+ Update(interface{}, []interface{}) error
+}
+
+// VTabCursor describes cursors that point into the virtual table and are used
+// to loop through the virtual table. See: http://sqlite.org/c3ref/vtab_cursor.html
+type VTabCursor interface {
+ // http://sqlite.org/vtab.html#xclose
+ Close() error
+ // http://sqlite.org/vtab.html#xfilter
+ Filter(idxNum int, idxStr string, vals []interface{}) error
+ // http://sqlite.org/vtab.html#xnext
+ Next() error
+ // http://sqlite.org/vtab.html#xeof
+ EOF() bool
+ // http://sqlite.org/vtab.html#xcolumn
+ Column(c *SQLiteContext, col int) error
+ // http://sqlite.org/vtab.html#xrowid
+ Rowid() (int64, error)
+}
+
+// DeclareVTab declares the Schema of a virtual table.
+// See: http://sqlite.org/c3ref/declare_vtab.html
+func (c *SQLiteConn) DeclareVTab(sql string) error {
+ zSQL := C.CString(sql)
+ defer C.free(unsafe.Pointer(zSQL))
+ rv := C.sqlite3_declare_vtab(c.db, zSQL)
+ if rv != C.SQLITE_OK {
+ return c.lastError()
+ }
+ return nil
+}
+
+// CreateModule registers a virtual table implementation.
+// See: http://sqlite.org/c3ref/create_module.html
+func (c *SQLiteConn) CreateModule(moduleName string, module Module) error {
+ mname := C.CString(moduleName)
+ defer C.free(unsafe.Pointer(mname))
+ udm := sqliteModule{c, moduleName, module}
+ rv := C._sqlite3_create_module(c.db, mname, C.uintptr_t(newHandle(c, &udm)))
+ if rv != C.SQLITE_OK {
+ return c.lastError()
+ }
+ return nil
+}
diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go
index 1595982..6dee0ef 100644
--- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go
+++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go
@@ -69,6 +69,9 @@ type DecoderConfig struct {
// - empty array = empty map and vice versa
// - negative numbers to overflowed uint values (base 10)
// - slice of maps to a merged map
+ // - single values are converted to slices if required. Each
+ // element is weakly decoded. For example: "4" can become []int{4}
+ // if the target type is an int slice.
//
WeaklyTypedInput bool
@@ -584,17 +587,28 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value)
valSlice := val
if valSlice.IsNil() || d.config.ZeroFields {
-
// Check input type
if dataValKind != reflect.Array && dataValKind != reflect.Slice {
- // Accept empty map instead of array/slice in weakly typed mode
- if d.config.WeaklyTypedInput && dataVal.Kind() == reflect.Map && dataVal.Len() == 0 {
- val.Set(reflect.MakeSlice(sliceType, 0, 0))
- return nil
- } else {
- return fmt.Errorf(
- "'%s': source data must be an array or slice, got %s", name, dataValKind)
+ if d.config.WeaklyTypedInput {
+ switch {
+ // Empty maps turn into empty slices
+ case dataValKind == reflect.Map:
+ if dataVal.Len() == 0 {
+ val.Set(reflect.MakeSlice(sliceType, 0, 0))
+ return nil
+ }
+
+ // All other types we try to convert to the slice type
+ // and "lift" it into it. i.e. a string becomes a string slice.
+ default:
+ // Just re-try this function with data as a slice.
+ return d.decodeSlice(name, []interface{}{data}, val)
+ }
}
+
+ return fmt.Errorf(
+ "'%s': source data must be an array or slice, got %s", name, dataValKind)
+
}
// Make a new slice to hold our result, same size as the original data.
diff --git a/vendor/github.com/nsheridan/autocert-wkfs-cache/cache.go b/vendor/github.com/nsheridan/autocert-wkfs-cache/cache.go
index e829ef2..3b3fd47 100644
--- a/vendor/github.com/nsheridan/autocert-wkfs-cache/cache.go
+++ b/vendor/github.com/nsheridan/autocert-wkfs-cache/cache.go
@@ -1,13 +1,13 @@
package wkfscache
import (
+ "context"
"os"
"path/filepath"
"go4.org/wkfs"
"golang.org/x/crypto/acme/autocert"
- "golang.org/x/net/context"
)
type Cache string
diff --git a/vendor/github.com/pelletier/go-buffruneio/buffruneio.go b/vendor/github.com/pelletier/go-buffruneio/buffruneio.go
index 41cab87..4e6d6ea 100644
--- a/vendor/github.com/pelletier/go-buffruneio/buffruneio.go
+++ b/vendor/github.com/pelletier/go-buffruneio/buffruneio.go
@@ -31,8 +31,13 @@ func NewReader(rd io.Reader) *Reader {
}
}
+type runeWithSize struct {
+ r rune
+ size int
+}
+
func (rd *Reader) feedBuffer() error {
- r, _, err := rd.input.ReadRune()
+ r, size, err := rd.input.ReadRune()
if err != nil {
if err != io.EOF {
@@ -41,7 +46,9 @@ func (rd *Reader) feedBuffer() error {
r = EOF
}
- rd.buffer.PushBack(r)
+ newRuneWithSize := runeWithSize{r, size}
+
+ rd.buffer.PushBack(newRuneWithSize)
if rd.current == nil {
rd.current = rd.buffer.Back()
}
@@ -49,17 +56,17 @@ func (rd *Reader) feedBuffer() error {
}
// ReadRune reads the next rune from buffer, or from the underlying reader if needed.
-func (rd *Reader) ReadRune() (rune, error) {
+func (rd *Reader) ReadRune() (rune, int, error) {
if rd.current == rd.buffer.Back() || rd.current == nil {
err := rd.feedBuffer()
if err != nil {
- return EOF, err
+ return EOF, 0, err
}
}
- r := rd.current.Value
+ runeWithSize := rd.current.Value.(runeWithSize)
rd.current = rd.current.Next()
- return r.(rune), nil
+ return runeWithSize.r, runeWithSize.size, nil
}
// UnreadRune pushes back the previously read rune in the buffer, extending it if needed.
@@ -84,9 +91,9 @@ func (rd *Reader) Forget() {
}
}
-// Peek returns at most the next n runes, reading from the uderlying source if
+// PeekRune returns at most the next n runes, reading from the uderlying source if
// needed. Does not move the current index. It includes EOF if reached.
-func (rd *Reader) Peek(n int) []rune {
+func (rd *Reader) PeekRunes(n int) []rune {
res := make([]rune, 0, n)
cursor := rd.current
for i := 0; i < n; i++ {
@@ -98,7 +105,7 @@ func (rd *Reader) Peek(n int) []rune {
cursor = rd.buffer.Back()
}
if cursor != nil {
- r := cursor.Value.(rune)
+ r := cursor.Value.(runeWithSize).r
res = append(res, r)
if r == EOF {
return res
diff --git a/vendor/github.com/pelletier/go-toml/lexer.go b/vendor/github.com/pelletier/go-toml/lexer.go
index 4ba134c..104f3b1 100644
--- a/vendor/github.com/pelletier/go-toml/lexer.go
+++ b/vendor/github.com/pelletier/go-toml/lexer.go
@@ -36,7 +36,7 @@ type tomlLexer struct {
// Basic read operations on input
func (l *tomlLexer) read() rune {
- r, err := l.input.ReadRune()
+ r, _, err := l.input.ReadRune()
if err != nil {
panic(err)
}
@@ -89,7 +89,7 @@ func (l *tomlLexer) emit(t tokenType) {
}
func (l *tomlLexer) peek() rune {
- r, err := l.input.ReadRune()
+ r, _, err := l.input.ReadRune()
if err != nil {
panic(err)
}
@@ -99,7 +99,7 @@ func (l *tomlLexer) peek() rune {
func (l *tomlLexer) follow(next string) bool {
for _, expectedRune := range next {
- r, err := l.input.ReadRune()
+ r, _, err := l.input.ReadRune()
defer l.input.UnreadRune()
if err != nil {
panic(err)
@@ -219,7 +219,7 @@ func (l *tomlLexer) lexRvalue() tomlLexStateFn {
break
}
- possibleDate := string(l.input.Peek(35))
+ possibleDate := string(l.input.PeekRunes(35))
dateMatch := dateRegexp.FindString(possibleDate)
if dateMatch != "" {
l.fastForward(len(dateMatch))
diff --git a/vendor/github.com/pelletier/go-toml/marshal.go b/vendor/github.com/pelletier/go-toml/marshal.go
new file mode 100644
index 0000000..a1d7010
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/marshal.go
@@ -0,0 +1,479 @@
+package toml
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+ "time"
+)
+
+/*
+TomlTree structural types and corresponding marshal types
+-------------------------------------------------------------------------------
+*TomlTree (*)struct, (*)map[string]interface{}
+[]*TomlTree (*)[](*)struct, (*)[](*)map[string]interface{}
+[]interface{} (as interface{}) (*)[]primitive, (*)[]([]interface{})
+interface{} (*)primitive
+
+TomlTree primitive types and corresponding marshal types
+-----------------------------------------------------------
+uint64 uint, uint8-uint64, pointers to same
+int64 int, int8-uint64, pointers to same
+float64 float32, float64, pointers to same
+string string, pointers to same
+bool bool, pointers to same
+time.Time time.Time{}, pointers to same
+*/
+
+type tomlOpts struct {
+ name string
+ include bool
+ omitempty bool
+}
+
+var timeType = reflect.TypeOf(time.Time{})
+var marshalerType = reflect.TypeOf(new(Marshaler)).Elem()
+
+// Check if the given marshall type maps to a TomlTree primitive
+func isPrimitive(mtype reflect.Type) bool {
+ switch mtype.Kind() {
+ case reflect.Ptr:
+ return isPrimitive(mtype.Elem())
+ case reflect.Bool:
+ return true
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return true
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return true
+ case reflect.Float32, reflect.Float64:
+ return true
+ case reflect.String:
+ return true
+ case reflect.Struct:
+ return mtype == timeType || isCustomMarshaler(mtype)
+ default:
+ return false
+ }
+}
+
+// Check if the given marshall type maps to a TomlTree slice
+func isTreeSlice(mtype reflect.Type) bool {
+ switch mtype.Kind() {
+ case reflect.Slice:
+ return !isOtherSlice(mtype)
+ default:
+ return false
+ }
+}
+
+// Check if the given marshall type maps to a non-TomlTree slice
+func isOtherSlice(mtype reflect.Type) bool {
+ switch mtype.Kind() {
+ case reflect.Ptr:
+ return isOtherSlice(mtype.Elem())
+ case reflect.Slice:
+ return isPrimitive(mtype.Elem()) || isOtherSlice(mtype.Elem())
+ default:
+ return false
+ }
+}
+
+// Check if the given marshall type maps to a TomlTree
+func isTree(mtype reflect.Type) bool {
+ switch mtype.Kind() {
+ case reflect.Map:
+ return true
+ case reflect.Struct:
+ return !isPrimitive(mtype)
+ default:
+ return false
+ }
+}
+
+func isCustomMarshaler(mtype reflect.Type) bool {
+ return mtype.Implements(marshalerType)
+}
+
+func callCustomMarshaler(mval reflect.Value) ([]byte, error) {
+ return mval.Interface().(Marshaler).MarshalTOML()
+}
+
+// Marshaler is the interface implemented by types that
+// can marshal themselves into valid TOML.
+type Marshaler interface {
+ MarshalTOML() ([]byte, error)
+}
+
+/*
+Marshal returns the TOML encoding of v. Behavior is similar to the Go json
+encoder, except that there is no concept of a Marshaler interface or MarshalTOML
+function for sub-structs, and currently only definite types can be marshaled
+(i.e. no `interface{}`).
+
+Note that pointers are automatically assigned the "omitempty" option, as TOML
+explicity does not handle null values (saying instead the label should be
+dropped).
+*/
+func Marshal(v interface{}) ([]byte, error) {
+ mtype := reflect.TypeOf(v)
+ if mtype.Kind() != reflect.Struct {
+ return []byte{}, errors.New("Only a struct can be marshaled to TOML")
+ }
+ sval := reflect.ValueOf(v)
+ if isCustomMarshaler(mtype) {
+ return callCustomMarshaler(sval)
+ }
+ t, err := valueToTree(mtype, sval)
+ if err != nil {
+ return []byte{}, err
+ }
+ s, err := t.ToTomlString()
+ return []byte(s), err
+}
+
+// Convert given marshal struct or map value to toml tree
+func valueToTree(mtype reflect.Type, mval reflect.Value) (*TomlTree, error) {
+ if mtype.Kind() == reflect.Ptr {
+ return valueToTree(mtype.Elem(), mval.Elem())
+ }
+ tval := newTomlTree()
+ switch mtype.Kind() {
+ case reflect.Struct:
+ for i := 0; i < mtype.NumField(); i++ {
+ mtypef, mvalf := mtype.Field(i), mval.Field(i)
+ opts := tomlOptions(mtypef)
+ if opts.include && (!opts.omitempty || !isZero(mvalf)) {
+ val, err := valueToToml(mtypef.Type, mvalf)
+ if err != nil {
+ return nil, err
+ }
+ tval.Set(opts.name, val)
+ }
+ }
+ case reflect.Map:
+ for _, key := range mval.MapKeys() {
+ mvalf := mval.MapIndex(key)
+ val, err := valueToToml(mtype.Elem(), mvalf)
+ if err != nil {
+ return nil, err
+ }
+ tval.Set(key.String(), val)
+ }
+ }
+ return tval, nil
+}
+
+// Convert given marshal slice to slice of Toml trees
+func valueToTreeSlice(mtype reflect.Type, mval reflect.Value) ([]*TomlTree, error) {
+ tval := make([]*TomlTree, mval.Len(), mval.Len())
+ for i := 0; i < mval.Len(); i++ {
+ val, err := valueToTree(mtype.Elem(), mval.Index(i))
+ if err != nil {
+ return nil, err
+ }
+ tval[i] = val
+ }
+ return tval, nil
+}
+
+// Convert given marshal slice to slice of toml values
+func valueToOtherSlice(mtype reflect.Type, mval reflect.Value) (interface{}, error) {
+ tval := make([]interface{}, mval.Len(), mval.Len())
+ for i := 0; i < mval.Len(); i++ {
+ val, err := valueToToml(mtype.Elem(), mval.Index(i))
+ if err != nil {
+ return nil, err
+ }
+ tval[i] = val
+ }
+ return tval, nil
+}
+
+// Convert given marshal value to toml value
+func valueToToml(mtype reflect.Type, mval reflect.Value) (interface{}, error) {
+ if mtype.Kind() == reflect.Ptr {
+ return valueToToml(mtype.Elem(), mval.Elem())
+ }
+ switch {
+ case isCustomMarshaler(mtype):
+ return callCustomMarshaler(mval)
+ case isTree(mtype):
+ return valueToTree(mtype, mval)
+ case isTreeSlice(mtype):
+ return valueToTreeSlice(mtype, mval)
+ case isOtherSlice(mtype):
+ return valueToOtherSlice(mtype, mval)
+ default:
+ switch mtype.Kind() {
+ case reflect.Bool:
+ return mval.Bool(), nil
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return mval.Int(), nil
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return mval.Uint(), nil
+ case reflect.Float32, reflect.Float64:
+ return mval.Float(), nil
+ case reflect.String:
+ return mval.String(), nil
+ case reflect.Struct:
+ return mval.Interface().(time.Time), nil
+ default:
+ return nil, fmt.Errorf("Marshal can't handle %v(%v)", mtype, mtype.Kind())
+ }
+ }
+}
+
+/*
+Unmarshal parses the TOML-encoded data and stores the result in the value
+pointed to by v. Behavior is similar to the Go json encoder, except that there
+is no concept of an Unmarshaler interface or UnmarshalTOML function for
+sub-structs, and currently only definite types can be unmarshaled to (i.e. no
+`interface{}`).
+*/
+func Unmarshal(data []byte, v interface{}) error {
+ mtype := reflect.TypeOf(v)
+ if mtype.Kind() != reflect.Ptr || mtype.Elem().Kind() != reflect.Struct {
+ return errors.New("Only a pointer to struct can be unmarshaled from TOML")
+ }
+
+ t, err := Load(string(data))
+ if err != nil {
+ return err
+ }
+
+ sval, err := valueFromTree(mtype.Elem(), t)
+ if err != nil {
+ return err
+ }
+ reflect.ValueOf(v).Elem().Set(sval)
+ return nil
+}
+
+// Convert toml tree to marshal struct or map, using marshal type
+func valueFromTree(mtype reflect.Type, tval *TomlTree) (reflect.Value, error) {
+ if mtype.Kind() == reflect.Ptr {
+ return unwrapPointer(mtype, tval)
+ }
+ var mval reflect.Value
+ switch mtype.Kind() {
+ case reflect.Struct:
+ mval = reflect.New(mtype).Elem()
+ for i := 0; i < mtype.NumField(); i++ {
+ mtypef := mtype.Field(i)
+ opts := tomlOptions(mtypef)
+ if opts.include {
+ key := opts.name
+ exists := tval.Has(key)
+ if exists {
+ val := tval.Get(key)
+ mvalf, err := valueFromToml(mtypef.Type, val)
+ if err != nil {
+ return mval, formatError(err, tval.GetPosition(key))
+ }
+ mval.Field(i).Set(mvalf)
+ }
+ }
+ }
+ case reflect.Map:
+ mval = reflect.MakeMap(mtype)
+ for _, key := range tval.Keys() {
+ val := tval.Get(key)
+ mvalf, err := valueFromToml(mtype.Elem(), val)
+ if err != nil {
+ return mval, formatError(err, tval.GetPosition(key))
+ }
+ mval.SetMapIndex(reflect.ValueOf(key), mvalf)
+ }
+ }
+ return mval, nil
+}
+
+// Convert toml value to marshal struct/map slice, using marshal type
+func valueFromTreeSlice(mtype reflect.Type, tval []*TomlTree) (reflect.Value, error) {
+ mval := reflect.MakeSlice(mtype, len(tval), len(tval))
+ for i := 0; i < len(tval); i++ {
+ val, err := valueFromTree(mtype.Elem(), tval[i])
+ if err != nil {
+ return mval, err
+ }
+ mval.Index(i).Set(val)
+ }
+ return mval, nil
+}
+
+// Convert toml value to marshal primitive slice, using marshal type
+func valueFromOtherSlice(mtype reflect.Type, tval []interface{}) (reflect.Value, error) {
+ mval := reflect.MakeSlice(mtype, len(tval), len(tval))
+ for i := 0; i < len(tval); i++ {
+ val, err := valueFromToml(mtype.Elem(), tval[i])
+ if err != nil {
+ return mval, err
+ }
+ mval.Index(i).Set(val)
+ }
+ return mval, nil
+}
+
+// Convert toml value to marshal value, using marshal type
+func valueFromToml(mtype reflect.Type, tval interface{}) (reflect.Value, error) {
+ if mtype.Kind() == reflect.Ptr {
+ return unwrapPointer(mtype, tval)
+ }
+ switch {
+ case isTree(mtype):
+ return valueFromTree(mtype, tval.(*TomlTree))
+ case isTreeSlice(mtype):
+ return valueFromTreeSlice(mtype, tval.([]*TomlTree))
+ case isOtherSlice(mtype):
+ return valueFromOtherSlice(mtype, tval.([]interface{}))
+ default:
+ switch mtype.Kind() {
+ case reflect.Bool:
+ val, ok := tval.(bool)
+ if !ok {
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to bool", tval, tval)
+ }
+ return reflect.ValueOf(val), nil
+ case reflect.Int:
+ val, ok := tval.(int64)
+ if !ok {
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to int", tval, tval)
+ }
+ return reflect.ValueOf(int(val)), nil
+ case reflect.Int8:
+ val, ok := tval.(int64)
+ if !ok {
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to int", tval, tval)
+ }
+ return reflect.ValueOf(int8(val)), nil
+ case reflect.Int16:
+ val, ok := tval.(int64)
+ if !ok {
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to int", tval, tval)
+ }
+ return reflect.ValueOf(int16(val)), nil
+ case reflect.Int32:
+ val, ok := tval.(int64)
+ if !ok {
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to int", tval, tval)
+ }
+ return reflect.ValueOf(int32(val)), nil
+ case reflect.Int64:
+ val, ok := tval.(int64)
+ if !ok {
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to int", tval, tval)
+ }
+ return reflect.ValueOf(val), nil
+ case reflect.Uint:
+ val, ok := tval.(int64)
+ if !ok {
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to uint", tval, tval)
+ }
+ return reflect.ValueOf(uint(val)), nil
+ case reflect.Uint8:
+ val, ok := tval.(int64)
+ if !ok {
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to uint", tval, tval)
+ }
+ return reflect.ValueOf(uint8(val)), nil
+ case reflect.Uint16:
+ val, ok := tval.(int64)
+ if !ok {
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to uint", tval, tval)
+ }
+ return reflect.ValueOf(uint16(val)), nil
+ case reflect.Uint32:
+ val, ok := tval.(int64)
+ if !ok {
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to uint", tval, tval)
+ }
+ return reflect.ValueOf(uint32(val)), nil
+ case reflect.Uint64:
+ val, ok := tval.(int64)
+ if !ok {
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to uint", tval, tval)
+ }
+ return reflect.ValueOf(uint64(val)), nil
+ case reflect.Float32:
+ val, ok := tval.(float64)
+ if !ok {
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to float", tval, tval)
+ }
+ return reflect.ValueOf(float32(val)), nil
+ case reflect.Float64:
+ val, ok := tval.(float64)
+ if !ok {
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to float", tval, tval)
+ }
+ return reflect.ValueOf(val), nil
+ case reflect.String:
+ val, ok := tval.(string)
+ if !ok {
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to string", tval, tval)
+ }
+ return reflect.ValueOf(val), nil
+ case reflect.Struct:
+ val, ok := tval.(time.Time)
+ if !ok {
+ return reflect.ValueOf(nil), fmt.Errorf("Can't convert %v(%T) to time", tval, tval)
+ }
+ return reflect.ValueOf(val), nil
+ default:
+ return reflect.ValueOf(nil), fmt.Errorf("Unmarshal can't handle %v(%v)", mtype, mtype.Kind())
+ }
+ }
+}
+
+func unwrapPointer(mtype reflect.Type, tval interface{}) (reflect.Value, error) {
+ val, err := valueFromToml(mtype.Elem(), tval)
+ if err != nil {
+ return reflect.ValueOf(nil), err
+ }
+ mval := reflect.New(mtype.Elem())
+ mval.Elem().Set(val)
+ return mval, nil
+}
+
+func tomlOptions(vf reflect.StructField) tomlOpts {
+ tag := vf.Tag.Get("toml")
+ parse := strings.Split(tag, ",")
+ result := tomlOpts{vf.Name, true, false}
+ if parse[0] != "" {
+ if parse[0] == "-" && len(parse) == 1 {
+ result.include = false
+ } else {
+ result.name = strings.Trim(parse[0], " ")
+ }
+ }
+ if vf.PkgPath != "" {
+ result.include = false
+ }
+ if len(parse) > 1 && strings.Trim(parse[1], " ") == "omitempty" {
+ result.omitempty = true
+ }
+ if vf.Type.Kind() == reflect.Ptr {
+ result.omitempty = true
+ }
+ return result
+}
+
+func isZero(val reflect.Value) bool {
+ switch val.Type().Kind() {
+ case reflect.Map:
+ fallthrough
+ case reflect.Array:
+ fallthrough
+ case reflect.Slice:
+ return val.Len() == 0
+ default:
+ return reflect.DeepEqual(val.Interface(), reflect.Zero(val.Type()).Interface())
+ }
+}
+
+func formatError(err error, pos Position) error {
+ if err.Error()[0] == '(' { // Error already contains position information
+ return err
+ }
+ return fmt.Errorf("%s: %s", pos, err)
+}
diff --git a/vendor/github.com/pelletier/go-toml/marshal_test.toml b/vendor/github.com/pelletier/go-toml/marshal_test.toml
new file mode 100644
index 0000000..1c5f98e
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/marshal_test.toml
@@ -0,0 +1,38 @@
+title = "TOML Marshal Testing"
+
+[basic]
+ bool = true
+ date = 1979-05-27T07:32:00Z
+ float = 123.4
+ int = 5000
+ string = "Bite me"
+ uint = 5001
+
+[basic_lists]
+ bools = [true,false,true]
+ dates = [1979-05-27T07:32:00Z,1980-05-27T07:32:00Z]
+ floats = [12.3,45.6,78.9]
+ ints = [8001,8001,8002]
+ strings = ["One","Two","Three"]
+ uints = [5002,5003]
+
+[basic_map]
+ one = "one"
+ two = "two"
+
+[subdoc]
+
+ [subdoc.first]
+ name = "First"
+
+ [subdoc.second]
+ name = "Second"
+
+[[subdoclist]]
+ name = "List.First"
+
+[[subdoclist]]
+ name = "List.Second"
+
+[[subdocptrs]]
+ name = "Second"
diff --git a/vendor/github.com/pelletier/go-toml/test.sh b/vendor/github.com/pelletier/go-toml/test.sh
index 15ac1e1..436d2fb 100755
--- a/vendor/github.com/pelletier/go-toml/test.sh
+++ b/vendor/github.com/pelletier/go-toml/test.sh
@@ -19,6 +19,9 @@ function git_clone() {
popd
}
+# Run go vet
+go vet ./...
+
go get github.com/pelletier/go-buffruneio
go get github.com/davecgh/go-spew/spew
@@ -38,8 +41,8 @@ cp -R cmd/* src/github.com/pelletier/go-toml/cmd
go build -o test_program_bin src/github.com/pelletier/go-toml/cmd/test_program.go
# Run basic unit tests
-go test github.com/pelletier/go-toml \
- github.com/pelletier/go-toml/cmd/tomljson
+go test github.com/pelletier/go-toml -v -covermode=count -coverprofile=coverage.out
+go test github.com/pelletier/go-toml/cmd/tomljson
# run the entire BurntSushi test suite
if [[ $# -eq 0 ]] ; then
diff --git a/vendor/github.com/pelletier/go-toml/token.go b/vendor/github.com/pelletier/go-toml/token.go
index e598cf9..5581fe0 100644
--- a/vendor/github.com/pelletier/go-toml/token.go
+++ b/vendor/github.com/pelletier/go-toml/token.go
@@ -135,5 +135,6 @@ func isDigit(r rune) bool {
func isHexDigit(r rune) bool {
return isDigit(r) ||
- r == 'A' || r == 'B' || r == 'C' || r == 'D' || r == 'E' || r == 'F'
+ (r >= 'a' && r <= 'f') ||
+ (r >= 'A' && r <= 'F')
}
diff --git a/vendor/github.com/pelletier/go-toml/toml.go b/vendor/github.com/pelletier/go-toml/toml.go
index ad23fe8..1ba56a1 100644
--- a/vendor/github.com/pelletier/go-toml/toml.go
+++ b/vendor/github.com/pelletier/go-toml/toml.go
@@ -10,13 +10,13 @@ import (
)
type tomlValue struct {
- value interface{}
+ value interface{} // string, int64, uint64, float64, bool, time.Time, [] of any of this list
position Position
}
// TomlTree is the result of the parsing of a TOML file.
type TomlTree struct {
- values map[string]interface{}
+ values map[string]interface{} // string -> *tomlValue, *TomlTree, []*TomlTree
position Position
}
@@ -28,10 +28,12 @@ func newTomlTree() *TomlTree {
}
// TreeFromMap initializes a new TomlTree object using the given map.
-func TreeFromMap(m map[string]interface{}) *TomlTree {
- return &TomlTree{
- values: m,
+func TreeFromMap(m map[string]interface{}) (*TomlTree, error) {
+ result, err := toTree(m)
+ if err != nil {
+ return nil, err
}
+ return result.(*TomlTree), nil
}
// Has returns a boolean indicating if the given key exists.
diff --git a/vendor/github.com/pelletier/go-toml/tomltree_conversions.go b/vendor/github.com/pelletier/go-toml/tomltree_conversions.go
deleted file mode 100644
index fc8f22b..0000000
--- a/vendor/github.com/pelletier/go-toml/tomltree_conversions.go
+++ /dev/null
@@ -1,227 +0,0 @@
-package toml
-
-// Tools to convert a TomlTree to different representations
-
-import (
- "fmt"
- "strconv"
- "strings"
- "time"
-)
-
-// encodes a string to a TOML-compliant string value
-func encodeTomlString(value string) string {
- result := ""
- for _, rr := range value {
- intRr := uint16(rr)
- switch rr {
- case '\b':
- result += "\\b"
- case '\t':
- result += "\\t"
- case '\n':
- result += "\\n"
- case '\f':
- result += "\\f"
- case '\r':
- result += "\\r"
- case '"':
- result += "\\\""
- case '\\':
- result += "\\\\"
- default:
- if intRr < 0x001F {
- result += fmt.Sprintf("\\u%0.4X", intRr)
- } else {
- result += string(rr)
- }
- }
- }
- return result
-}
-
-// Value print support function for ToString()
-// Outputs the TOML compliant string representation of a value
-func toTomlValue(item interface{}, indent int) string {
- tab := strings.Repeat(" ", indent)
- switch value := item.(type) {
- case int:
- return tab + strconv.FormatInt(int64(value), 10)
- case int8:
- return tab + strconv.FormatInt(int64(value), 10)
- case int16:
- return tab + strconv.FormatInt(int64(value), 10)
- case int32:
- return tab + strconv.FormatInt(int64(value), 10)
- case int64:
- return tab + strconv.FormatInt(value, 10)
- case uint:
- return tab + strconv.FormatUint(uint64(value), 10)
- case uint8:
- return tab + strconv.FormatUint(uint64(value), 10)
- case uint16:
- return tab + strconv.FormatUint(uint64(value), 10)
- case uint32:
- return tab + strconv.FormatUint(uint64(value), 10)
- case uint64:
- return tab + strconv.FormatUint(value, 10)
- case float32:
- return tab + strconv.FormatFloat(float64(value), 'f', -1, 32)
- case float64:
- return tab + strconv.FormatFloat(value, 'f', -1, 64)
- case string:
- return tab + "\"" + encodeTomlString(value) + "\""
- case bool:
- if value {
- return "true"
- }
- return "false"
- case time.Time:
- return tab + value.Format(time.RFC3339)
- case []interface{}:
- values := []string{}
- for _, item := range value {
- values = append(values, toTomlValue(item, 0))
- }
- return "[" + strings.Join(values, ",") + "]"
- case nil:
- return ""
- default:
- panic(fmt.Errorf("unsupported value type %T: %v", value, value))
- }
-}
-
-// Recursive support function for ToString()
-// Outputs a tree, using the provided keyspace to prefix table names
-func (t *TomlTree) toToml(indent, keyspace string) string {
- resultChunks := []string{}
- for k, v := range t.values {
- // figure out the keyspace
- combinedKey := k
- if keyspace != "" {
- combinedKey = keyspace + "." + combinedKey
- }
- resultChunk := ""
- // output based on type
- switch node := v.(type) {
- case []*TomlTree:
- for _, item := range node {
- if len(item.Keys()) > 0 {
- resultChunk += fmt.Sprintf("\n%s[[%s]]\n", indent, combinedKey)
- }
- resultChunk += item.toToml(indent+" ", combinedKey)
- }
- resultChunks = append(resultChunks, resultChunk)
- case *TomlTree:
- if len(node.Keys()) > 0 {
- resultChunk += fmt.Sprintf("\n%s[%s]\n", indent, combinedKey)
- }
- resultChunk += node.toToml(indent+" ", combinedKey)
- resultChunks = append(resultChunks, resultChunk)
- case map[string]interface{}:
- sub := TreeFromMap(node)
-
- if len(sub.Keys()) > 0 {
- resultChunk += fmt.Sprintf("\n%s[%s]\n", indent, combinedKey)
- }
- resultChunk += sub.toToml(indent+" ", combinedKey)
- resultChunks = append(resultChunks, resultChunk)
- case map[string]string:
- sub := TreeFromMap(convertMapStringString(node))
-
- if len(sub.Keys()) > 0 {
- resultChunk += fmt.Sprintf("\n%s[%s]\n", indent, combinedKey)
- }
- resultChunk += sub.toToml(indent+" ", combinedKey)
- resultChunks = append(resultChunks, resultChunk)
- case map[interface{}]interface{}:
- sub := TreeFromMap(convertMapInterfaceInterface(node))
-
- if len(sub.Keys()) > 0 {
- resultChunk += fmt.Sprintf("\n%s[%s]\n", indent, combinedKey)
- }
- resultChunk += sub.toToml(indent+" ", combinedKey)
- resultChunks = append(resultChunks, resultChunk)
- case *tomlValue:
- resultChunk = fmt.Sprintf("%s%s = %s\n", indent, k, toTomlValue(node.value, 0))
- resultChunks = append([]string{resultChunk}, resultChunks...)
- default:
- resultChunk = fmt.Sprintf("%s%s = %s\n", indent, k, toTomlValue(v, 0))
- resultChunks = append([]string{resultChunk}, resultChunks...)
- }
-
- }
- return strings.Join(resultChunks, "")
-}
-
-// Same as ToToml(), but does not panic and returns an error
-func (t *TomlTree) toTomlSafe(indent, keyspace string) (result string, err error) {
- defer func() {
- if r := recover(); r != nil {
- result = ""
- switch x := r.(type) {
- case error:
- err = x
- default:
- err = fmt.Errorf("unknown panic: %s", r)
- }
- }
- }()
- result = t.toToml(indent, keyspace)
- return
-}
-
-func convertMapStringString(in map[string]string) map[string]interface{} {
- result := make(map[string]interface{}, len(in))
- for k, v := range in {
- result[k] = v
- }
- return result
-}
-
-func convertMapInterfaceInterface(in map[interface{}]interface{}) map[string]interface{} {
- result := make(map[string]interface{}, len(in))
- for k, v := range in {
- result[k.(string)] = v
- }
- return result
-}
-
-// ToString generates a human-readable representation of the current tree.
-// Output spans multiple lines, and is suitable for ingest by a TOML parser.
-// If the conversion cannot be performed, ToString returns a non-nil error.
-func (t *TomlTree) ToString() (string, error) {
- return t.toTomlSafe("", "")
-}
-
-// String generates a human-readable representation of the current tree.
-// Alias of ToString.
-func (t *TomlTree) String() string {
- result, _ := t.ToString()
- return result
-}
-
-// ToMap recursively generates a representation of the current tree using map[string]interface{}.
-func (t *TomlTree) ToMap() map[string]interface{} {
- result := map[string]interface{}{}
-
- for k, v := range t.values {
- switch node := v.(type) {
- case []*TomlTree:
- var array []interface{}
- for _, item := range node {
- array = append(array, item.ToMap())
- }
- result[k] = array
- case *TomlTree:
- result[k] = node.ToMap()
- case map[string]interface{}:
- sub := TreeFromMap(node)
- result[k] = sub.ToMap()
- case *tomlValue:
- result[k] = node.value
- }
- }
-
- return result
-}
diff --git a/vendor/github.com/pelletier/go-toml/tomltree_create.go b/vendor/github.com/pelletier/go-toml/tomltree_create.go
new file mode 100644
index 0000000..c6054f3
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/tomltree_create.go
@@ -0,0 +1,135 @@
+package toml
+
+import (
+ "fmt"
+ "reflect"
+ "time"
+)
+
+// supported values:
+// string, bool, int64, uint64, float64, time.Time, int, int8, int16, int32, uint, uint8, uint16, uint32, float32
+
+var kindToTypeMapping = map[reflect.Kind]reflect.Type{
+ reflect.Bool: reflect.TypeOf(true),
+ reflect.String: reflect.TypeOf(""),
+ reflect.Float32: reflect.TypeOf(float64(1)),
+ reflect.Float64: reflect.TypeOf(float64(1)),
+ reflect.Int: reflect.TypeOf(int64(1)),
+ reflect.Int8: reflect.TypeOf(int64(1)),
+ reflect.Int16: reflect.TypeOf(int64(1)),
+ reflect.Int32: reflect.TypeOf(int64(1)),
+ reflect.Int64: reflect.TypeOf(int64(1)),
+ reflect.Uint: reflect.TypeOf(uint64(1)),
+ reflect.Uint8: reflect.TypeOf(uint64(1)),
+ reflect.Uint16: reflect.TypeOf(uint64(1)),
+ reflect.Uint32: reflect.TypeOf(uint64(1)),
+ reflect.Uint64: reflect.TypeOf(uint64(1)),
+}
+
+func simpleValueCoercion(object interface{}) (interface{}, error) {
+ switch original := object.(type) {
+ case string, bool, int64, uint64, float64, time.Time:
+ return original, nil
+ case int:
+ return int64(original), nil
+ case int8:
+ return int64(original), nil
+ case int16:
+ return int64(original), nil
+ case int32:
+ return int64(original), nil
+ case uint:
+ return uint64(original), nil
+ case uint8:
+ return uint64(original), nil
+ case uint16:
+ return uint64(original), nil
+ case uint32:
+ return uint64(original), nil
+ case float32:
+ return float64(original), nil
+ case fmt.Stringer:
+ return original.String(), nil
+ default:
+ return nil, fmt.Errorf("cannot convert type %T to TomlTree", object)
+ }
+}
+
+func sliceToTree(object interface{}) (interface{}, error) {
+ // arrays are a bit tricky, since they can represent either a
+ // collection of simple values, which is represented by one
+ // *tomlValue, or an array of tables, which is represented by an
+ // array of *TomlTree.
+
+ // holding the assumption that this function is called from toTree only when value.Kind() is Array or Slice
+ value := reflect.ValueOf(object)
+ insideType := value.Type().Elem()
+ length := value.Len()
+ if length > 0 {
+ insideType = reflect.ValueOf(value.Index(0).Interface()).Type()
+ }
+ if insideType.Kind() == reflect.Map {
+ // this is considered as an array of tables
+ tablesArray := make([]*TomlTree, 0, length)
+ for i := 0; i < length; i++ {
+ table := value.Index(i)
+ tree, err := toTree(table.Interface())
+ if err != nil {
+ return nil, err
+ }
+ tablesArray = append(tablesArray, tree.(*TomlTree))
+ }
+ return tablesArray, nil
+ }
+
+ sliceType := kindToTypeMapping[insideType.Kind()]
+ if sliceType == nil {
+ sliceType = insideType
+ }
+
+ arrayValue := reflect.MakeSlice(reflect.SliceOf(sliceType), 0, length)
+
+ for i := 0; i < length; i++ {
+ val := value.Index(i).Interface()
+ simpleValue, err := simpleValueCoercion(val)
+ if err != nil {
+ return nil, err
+ }
+ arrayValue = reflect.Append(arrayValue, reflect.ValueOf(simpleValue))
+ }
+ return &tomlValue{arrayValue.Interface(), Position{}}, nil
+}
+
+func toTree(object interface{}) (interface{}, error) {
+ value := reflect.ValueOf(object)
+
+ if value.Kind() == reflect.Map {
+ values := map[string]interface{}{}
+ keys := value.MapKeys()
+ for _, key := range keys {
+ if key.Kind() != reflect.String {
+ if _, ok := key.Interface().(string); !ok {
+ return nil, fmt.Errorf("map key needs to be a string, not %T (%v)", key.Interface(), key.Kind())
+ }
+ }
+
+ v := value.MapIndex(key)
+ newValue, err := toTree(v.Interface())
+ if err != nil {
+ return nil, err
+ }
+ values[key.String()] = newValue
+ }
+ return &TomlTree{values, Position{}}, nil
+ }
+
+ if value.Kind() == reflect.Array || value.Kind() == reflect.Slice {
+ return sliceToTree(object)
+ }
+
+ simpleValue, err := simpleValueCoercion(object)
+ if err != nil {
+ return nil, err
+ }
+ return &tomlValue{simpleValue, Position{}}, nil
+}
diff --git a/vendor/github.com/pelletier/go-toml/tomltree_write.go b/vendor/github.com/pelletier/go-toml/tomltree_write.go
new file mode 100644
index 0000000..6a7fa17
--- /dev/null
+++ b/vendor/github.com/pelletier/go-toml/tomltree_write.go
@@ -0,0 +1,217 @@
+package toml
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+ "reflect"
+)
+
+// encodes a string to a TOML-compliant string value
+func encodeTomlString(value string) string {
+ result := ""
+ for _, rr := range value {
+ switch rr {
+ case '\b':
+ result += "\\b"
+ case '\t':
+ result += "\\t"
+ case '\n':
+ result += "\\n"
+ case '\f':
+ result += "\\f"
+ case '\r':
+ result += "\\r"
+ case '"':
+ result += "\\\""
+ case '\\':
+ result += "\\\\"
+ default:
+ intRr := uint16(rr)
+ if intRr < 0x001F {
+ result += fmt.Sprintf("\\u%0.4X", intRr)
+ } else {
+ result += string(rr)
+ }
+ }
+ }
+ return result
+}
+
+func tomlValueStringRepresentation(v interface{}) (string, error) {
+ switch value := v.(type) {
+ case uint64:
+ return strconv.FormatUint(value, 10), nil
+ case int64:
+ return strconv.FormatInt(value, 10), nil
+ case float64:
+ return strconv.FormatFloat(value, 'f', -1, 32), nil
+ case string:
+ return "\"" + encodeTomlString(value) + "\"", nil
+ case []byte:
+ b, _ := v.([]byte)
+ return tomlValueStringRepresentation(string(b))
+ case bool:
+ if value {
+ return "true", nil
+ }
+ return "false", nil
+ case time.Time:
+ return value.Format(time.RFC3339), nil
+ case nil:
+ return "", nil
+ }
+
+ rv := reflect.ValueOf(v)
+
+ if rv.Kind() == reflect.Slice {
+ values := []string{}
+ for i := 0; i < rv.Len(); i++ {
+ item := rv.Index(i).Interface()
+ itemRepr, err := tomlValueStringRepresentation(item)
+ if err != nil {
+ return "", err
+ }
+ values = append(values, itemRepr)
+ }
+ return "[" + strings.Join(values, ",") + "]", nil
+ }
+ return "", fmt.Errorf("unsupported value type %T: %v", v, v)
+}
+
+func (t *TomlTree) writeTo(w io.Writer, indent, keyspace string, bytesCount int64) (int64, error) {
+ simpleValuesKeys := make([]string, 0)
+ complexValuesKeys := make([]string, 0)
+
+ for k := range t.values {
+ v := t.values[k]
+ switch v.(type) {
+ case *TomlTree, []*TomlTree:
+ complexValuesKeys = append(complexValuesKeys, k)
+ default:
+ simpleValuesKeys = append(simpleValuesKeys, k)
+ }
+ }
+
+ sort.Strings(simpleValuesKeys)
+ sort.Strings(complexValuesKeys)
+
+ for _, k := range simpleValuesKeys {
+ v, ok := t.values[k].(*tomlValue)
+ if !ok {
+ return bytesCount, fmt.Errorf("invalid value type at %s: %T", k, t.values[k])
+ }
+
+ repr, err := tomlValueStringRepresentation(v.value)
+ if err != nil {
+ return bytesCount, err
+ }
+
+ kvRepr := fmt.Sprintf("%s%s = %s\n", indent, k, repr)
+ writtenBytesCount, err := w.Write([]byte(kvRepr))
+ bytesCount += int64(writtenBytesCount)
+ if err != nil {
+ return bytesCount, err
+ }
+ }
+
+ for _, k := range complexValuesKeys {
+ v := t.values[k]
+
+ combinedKey := k
+ if keyspace != "" {
+ combinedKey = keyspace + "." + combinedKey
+ }
+
+ switch node := v.(type) {
+ // node has to be of those two types given how keys are sorted above
+ case *TomlTree:
+ tableName := fmt.Sprintf("\n%s[%s]\n", indent, combinedKey)
+ writtenBytesCount, err := w.Write([]byte(tableName))
+ bytesCount += int64(writtenBytesCount)
+ if err != nil {
+ return bytesCount, err
+ }
+ bytesCount, err = node.writeTo(w, indent+" ", combinedKey, bytesCount)
+ if err != nil {
+ return bytesCount, err
+ }
+ case []*TomlTree:
+ for _, subTree := range node {
+ if len(subTree.values) > 0 {
+ tableArrayName := fmt.Sprintf("\n%s[[%s]]\n", indent, combinedKey)
+ writtenBytesCount, err := w.Write([]byte(tableArrayName))
+ bytesCount += int64(writtenBytesCount)
+ if err != nil {
+ return bytesCount, err
+ }
+
+ bytesCount, err = subTree.writeTo(w, indent+" ", combinedKey, bytesCount)
+ if err != nil {
+ return bytesCount, err
+ }
+ }
+ }
+ }
+ }
+
+ return bytesCount, nil
+}
+
+// WriteTo encode the TomlTree as Toml and writes it to the writer w.
+// Returns the number of bytes written in case of success, or an error if anything happened.
+func (t *TomlTree) WriteTo(w io.Writer) (int64, error) {
+ return t.writeTo(w, "", "", 0)
+}
+
+// ToTomlString generates a human-readable representation of the current tree.
+// Output spans multiple lines, and is suitable for ingest by a TOML parser.
+// If the conversion cannot be performed, ToString returns a non-nil error.
+func (t *TomlTree) ToTomlString() (string, error) {
+ var buf bytes.Buffer
+ _, err := t.WriteTo(&buf)
+ if err != nil {
+ return "", err
+ }
+ return buf.String(), nil
+}
+
+// String generates a human-readable representation of the current tree.
+// Alias of ToString. Present to implement the fmt.Stringer interface.
+func (t *TomlTree) String() string {
+ result, _ := t.ToTomlString()
+ return result
+}
+
+// ToMap recursively generates a representation of the tree using Go built-in structures.
+// The following types are used:
+// * uint64
+// * int64
+// * bool
+// * string
+// * time.Time
+// * map[string]interface{} (where interface{} is any of this list)
+// * []interface{} (where interface{} is any of this list)
+func (t *TomlTree) ToMap() map[string]interface{} {
+ result := map[string]interface{}{}
+
+ for k, v := range t.values {
+ switch node := v.(type) {
+ case []*TomlTree:
+ var array []interface{}
+ for _, item := range node {
+ array = append(array, item.ToMap())
+ }
+ result[k] = array
+ case *TomlTree:
+ result[k] = node.ToMap()
+ case *tomlValue:
+ result[k] = node.value
+ }
+ }
+ return result
+}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
index f60710f..1fdef9e 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/doc.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go
@@ -69,7 +69,7 @@
// Metrics
//
// The number of exported identifiers in this package might appear a bit
-// overwhelming. Hovever, in addition to the basic plumbing shown in the example
+// overwhelming. However, in addition to the basic plumbing shown in the example
// above, you only need to understand the different metric types and their
// vector versions for basic usage.
//
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
index 1ae9b8f..f967645 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
@@ -217,6 +217,14 @@ func NewGoCollector() Collector {
),
eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 },
valType: GaugeValue,
+ }, {
+ desc: NewDesc(
+ memstatNamespace("gc_cpu_fraction"),
+ "The fraction of this program's available CPU time used by the GC since the program started.",
+ nil, nil,
+ ),
+ eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
+ valType: GaugeValue,
},
},
}
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
index 78d5f19..8c6b5bd 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go
@@ -243,7 +243,7 @@ func (r *Registry) Register(c Collector) error {
}()
r.mtx.Lock()
defer r.mtx.Unlock()
- // Coduct various tests...
+ // Conduct various tests...
for desc := range descChan {
// Is the descriptor valid at all?
diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go
index 7d3e810..ff75ce5 100644
--- a/vendor/github.com/prometheus/client_golang/prometheus/value.go
+++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go
@@ -44,7 +44,7 @@ var errInconsistentCardinality = errors.New("inconsistent label cardinality")
// ValueType. This is a low-level building block used by the library to back the
// implementations of Counter, Gauge, and Untyped.
type value struct {
- // valBits containst the bits of the represented float64 value. It has
+ // valBits contains the bits of the represented float64 value. It has
// to go first in the struct to guarantee alignment for atomic
// operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG
valBits uint64
diff --git a/vendor/github.com/sethgrid/pester/main.go b/vendor/github.com/sethgrid/pester/main.go
index 2771a23..4a69791 100644
--- a/vendor/github.com/sethgrid/pester/main.go
+++ b/vendor/github.com/sethgrid/pester/main.go
@@ -1,7 +1,6 @@
-package pester
-
-// pester provides additional resiliency over the standard http client methods by
+// Package pester provides additional resiliency over the standard http client methods by
// allowing you to control concurrency, retries, and a backoff strategy.
+package pester
import (
"bytes"
@@ -9,7 +8,6 @@ import (
"fmt"
"io"
"io/ioutil"
- "math"
"math/rand"
"net/http"
"net/url"
@@ -17,6 +15,15 @@ import (
"time"
)
+//ErrUnexpectedMethod occurs when an http.Client method is unable to be mapped from a calling method in the pester client
+var ErrUnexpectedMethod = errors.New("unexpected client method, must be one of Do, Get, Head, Post, or PostFrom")
+
+// ErrReadingBody happens when we cannot read the body bytes
+var ErrReadingBody = errors.New("error reading body")
+
+// ErrReadingRequestBody happens when we cannot read the request body bytes
+var ErrReadingRequestBody = errors.New("error reading request body")
+
// Client wraps the http client and exposes all the functionality of the http.Client.
// Additionally, Client provides pester specific values for handling resiliency.
type Client struct {
@@ -33,6 +40,7 @@ type Client struct {
MaxRetries int
Backoff BackoffStrategy
KeepLog bool
+ LogHook LogHook
SuccessReqNum int
SuccessRetryNum int
@@ -76,6 +84,12 @@ type params struct {
data url.Values
}
+var random *rand.Rand
+
+func init() {
+ random = rand.New(rand.NewSource(time.Now().UnixNano()))
+}
+
// New constructs a new DefaultClient with sensible default values
func New() *Client {
return &Client{
@@ -95,6 +109,10 @@ func NewExtendedClient(hc *http.Client) *Client {
return c
}
+// PrintErrStrategy is used to log attempts as they happen.
+// You know, more visible
+type LogHook func(e ErrEntry)
+
// BackoffStrategy is used to determine how long a retry request should wait until attempted
type BackoffStrategy func(retry int) time.Duration
@@ -108,13 +126,13 @@ func DefaultBackoff(_ int) time.Duration {
// ExponentialBackoff returns ever increasing backoffs by a power of 2
func ExponentialBackoff(i int) time.Duration {
- return time.Duration(math.Pow(2, float64(i))) * time.Second
+ return time.Duration(1<<uint(i)) * time.Second
}
// ExponentialJitterBackoff returns ever increasing backoffs by a power of 2
// with +/- 0-33% to prevent sychronized reuqests.
func ExponentialJitterBackoff(i int) time.Duration {
- return jitter(int(math.Pow(2, float64(i))))
+ return jitter(int(1 << uint(i)))
}
// LinearBackoff returns increasing durations, each a second longer than the last
@@ -134,14 +152,8 @@ func jitter(i int) time.Duration {
maxJitter := ms / 3
- rand.Seed(time.Now().Unix())
- jitter := rand.Intn(maxJitter + 1)
-
- if rand.Intn(2) == 1 {
- ms = ms + jitter
- } else {
- ms = ms - jitter
- }
+ // ms ± rand
+ ms += random.Intn(2*maxJitter) - maxJitter
// a jitter of 0 messes up the time.Tick chan
if ms <= 0 {
@@ -206,14 +218,14 @@ func (c *Client) pester(p params) (*http.Response, error) {
if p.req != nil && p.req.Body != nil {
originalRequestBody, err = ioutil.ReadAll(p.req.Body)
if err != nil {
- return &http.Response{}, errors.New("error reading request body")
+ return nil, ErrReadingRequestBody
}
p.req.Body.Close()
}
if p.body != nil {
originalBody, err = ioutil.ReadAll(p.body)
if err != nil {
- return &http.Response{}, errors.New("error reading body")
+ return nil, ErrReadingBody
}
}
@@ -238,7 +250,6 @@ func (c *Client) pester(p params) (*http.Response, error) {
return
default:
}
- resp := &http.Response{}
// rehydrate the body (it is drained each read)
if len(originalRequestBody) > 0 {
@@ -248,6 +259,7 @@ func (c *Client) pester(p params) (*http.Response, error) {
p.body = bytes.NewBuffer(originalBody)
}
+ var resp *http.Response
// route the calls
switch p.method {
case "Do":
@@ -260,6 +272,8 @@ func (c *Client) pester(p params) (*http.Response, error) {
resp, err = httpClient.Post(p.url, p.bodyType, p.body)
case "PostForm":
resp, err = httpClient.PostForm(p.url, p.data)
+ default:
+ err = ErrUnexpectedMethod
}
// Early return if we have a valid result
@@ -320,14 +334,13 @@ func (c *Client) pester(p params) (*http.Response, error) {
}
}()
- select {
- case res := <-resultCh:
- c.Lock()
- defer c.Unlock()
- c.SuccessReqNum = res.req
- c.SuccessRetryNum = res.retry
- return res.resp, res.err
- }
+ res := <-resultCh
+ c.Lock()
+ defer c.Unlock()
+ c.SuccessReqNum = res.req
+ c.SuccessRetryNum = res.retry
+ return res.resp, res.err
+
}
// LogString provides a string representation of the errors the client has seen
@@ -336,12 +349,17 @@ func (c *Client) LogString() string {
defer c.Unlock()
var res string
for _, e := range c.ErrLog {
- res += fmt.Sprintf("%d %s [%s] %s request-%d retry-%d error: %s\n",
- e.Time.Unix(), e.Method, e.Verb, e.URL, e.Request, e.Retry, e.Err)
+ res += c.FormatError(e)
}
return res
}
+// Format the Error to human readable string
+func (c *Client) FormatError(e ErrEntry) string {
+ return fmt.Sprintf("%d %s [%s] %s request-%d retry-%d error: %s\n",
+ e.Time.Unix(), e.Method, e.Verb, e.URL, e.Request, e.Retry, e.Err)
+}
+
// LogErrCount is a helper method used primarily for test validation
func (c *Client) LogErrCount() int {
c.Lock()
@@ -358,8 +376,12 @@ func (c *Client) EmbedHTTPClient(hc *http.Client) {
func (c *Client) log(e ErrEntry) {
if c.KeepLog {
c.Lock()
+ defer c.Unlock()
c.ErrLog = append(c.ErrLog, e)
- c.Unlock()
+ } else if c.LogHook != nil {
+ // NOTE: There is a possibility that Log Printing hook slows it down.
+ // but the consumer can always do the Job in a go-routine.
+ c.LogHook(e)
}
}
diff --git a/vendor/github.com/spf13/cast/Makefile b/vendor/github.com/spf13/cast/Makefile
new file mode 100644
index 0000000..7ccf893
--- /dev/null
+++ b/vendor/github.com/spf13/cast/Makefile
@@ -0,0 +1,38 @@
+# A Self-Documenting Makefile: http://marmelab.com/blog/2016/02/29/auto-documented-makefile.html
+
+.PHONY: check fmt lint test test-race vet test-cover-html help
+.DEFAULT_GOAL := help
+
+check: test-race fmt vet lint ## Run tests and linters
+
+test: ## Run tests
+ go test ./...
+
+test-race: ## Run tests with race detector
+ go test -race ./...
+
+fmt: ## Run gofmt linter
+ @for d in `go list` ; do \
+ if [ "`gofmt -l -s $$GOPATH/src/$$d | tee /dev/stderr`" ]; then \
+ echo "^ improperly formatted go files" && echo && exit 1; \
+ fi \
+ done
+
+lint: ## Run golint linter
+ @for d in `go list` ; do \
+ if [ "`golint $$d | tee /dev/stderr`" ]; then \
+ echo "^ golint errors!" && echo && exit 1; \
+ fi \
+ done
+
+vet: ## Run go vet linter
+ @if [ "`go vet | tee /dev/stderr`" ]; then \
+ echo "^ go vet errors!" && echo && exit 1; \
+ fi
+
+test-cover-html: ## Generate test coverage report
+ go test -coverprofile=coverage.out -covermode=count
+ go tool cover -func=coverage.out
+
+help:
+ @grep -E '^[a-zA-Z0-9_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
diff --git a/vendor/github.com/spf13/cast/README.md b/vendor/github.com/spf13/cast/README.md
index af7a1fd..e693939 100644
--- a/vendor/github.com/spf13/cast/README.md
+++ b/vendor/github.com/spf13/cast/README.md
@@ -1,5 +1,8 @@
cast
====
+[![GoDoc](https://godoc.org/github.com/spf13/cast?status.svg)](https://godoc.org/github.com/spf13/cast)
+[![Build Status](https://api.travis-ci.org/spf13/cast.svg?branch=master)](https://travis-ci.org/spf13/cast)
+[![Go Report Card](https://goreportcard.com/badge/github.com/spf13/cast)](https://goreportcard.com/report/github.com/spf13/cast)
Easy and safe casting from one type to another in Go
diff --git a/vendor/github.com/spf13/cast/cast.go b/vendor/github.com/spf13/cast/cast.go
index 6ca3e0e..dc504b4 100644
--- a/vendor/github.com/spf13/cast/cast.go
+++ b/vendor/github.com/spf13/cast/cast.go
@@ -3,80 +3,150 @@
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
+// Package cast provides easy and safe casting in Go.
package cast
import "time"
+// ToBool casts an interface to a bool type.
func ToBool(i interface{}) bool {
v, _ := ToBoolE(i)
return v
}
+// ToTime casts an interface to a time.Time type.
func ToTime(i interface{}) time.Time {
v, _ := ToTimeE(i)
return v
}
+// ToDuration casts an interface to a time.Duration type.
func ToDuration(i interface{}) time.Duration {
v, _ := ToDurationE(i)
return v
}
+// ToFloat64 casts an interface to a float64 type.
func ToFloat64(i interface{}) float64 {
v, _ := ToFloat64E(i)
return v
}
+// ToFloat32 casts an interface to a float32 type.
+func ToFloat32(i interface{}) float32 {
+ v, _ := ToFloat32E(i)
+ return v
+}
+
+// ToInt64 casts an interface to an int64 type.
func ToInt64(i interface{}) int64 {
v, _ := ToInt64E(i)
return v
}
+// ToInt32 casts an interface to an int32 type.
+func ToInt32(i interface{}) int32 {
+ v, _ := ToInt32E(i)
+ return v
+}
+
+// ToInt16 casts an interface to an int16 type.
+func ToInt16(i interface{}) int16 {
+ v, _ := ToInt16E(i)
+ return v
+}
+
+// ToInt8 casts an interface to an int8 type.
+func ToInt8(i interface{}) int8 {
+ v, _ := ToInt8E(i)
+ return v
+}
+
+// ToInt casts an interface to an int type.
func ToInt(i interface{}) int {
v, _ := ToIntE(i)
return v
}
+// ToUint casts an interface to a uint type.
+func ToUint(i interface{}) uint {
+ v, _ := ToUintE(i)
+ return v
+}
+
+// ToUint64 casts an interface to a uint64 type.
+func ToUint64(i interface{}) uint64 {
+ v, _ := ToUint64E(i)
+ return v
+}
+
+// ToUint32 casts an interface to a uint32 type.
+func ToUint32(i interface{}) uint32 {
+ v, _ := ToUint32E(i)
+ return v
+}
+
+// ToUint16 casts an interface to a uint16 type.
+func ToUint16(i interface{}) uint16 {
+ v, _ := ToUint16E(i)
+ return v
+}
+
+// ToUint8 casts an interface to a uint8 type.
+func ToUint8(i interface{}) uint8 {
+ v, _ := ToUint8E(i)
+ return v
+}
+
+// ToString casts an interface to a string type.
func ToString(i interface{}) string {
v, _ := ToStringE(i)
return v
}
+// ToStringMapString casts an interface to a map[string]string type.
func ToStringMapString(i interface{}) map[string]string {
v, _ := ToStringMapStringE(i)
return v
}
+// ToStringMapStringSlice casts an interface to a map[string][]string type.
func ToStringMapStringSlice(i interface{}) map[string][]string {
v, _ := ToStringMapStringSliceE(i)
return v
}
+// ToStringMapBool casts an interface to a map[string]bool type.
func ToStringMapBool(i interface{}) map[string]bool {
v, _ := ToStringMapBoolE(i)
return v
}
+// ToStringMap casts an interface to a map[string]interface{} type.
func ToStringMap(i interface{}) map[string]interface{} {
v, _ := ToStringMapE(i)
return v
}
+// ToSlice casts an interface to a []interface{} type.
func ToSlice(i interface{}) []interface{} {
v, _ := ToSliceE(i)
return v
}
+// ToBoolSlice casts an interface to a []bool type.
func ToBoolSlice(i interface{}) []bool {
v, _ := ToBoolSliceE(i)
return v
}
+// ToStringSlice casts an interface to a []string type.
func ToStringSlice(i interface{}) []string {
v, _ := ToStringSliceE(i)
return v
}
+// ToIntSlice casts an interface to a []int type.
func ToIntSlice(i interface{}) []int {
v, _ := ToIntSliceE(i)
return v
diff --git a/vendor/github.com/spf13/cast/caste.go b/vendor/github.com/spf13/cast/caste.go
index 10acc44..4e75f64 100644
--- a/vendor/github.com/spf13/cast/caste.go
+++ b/vendor/github.com/spf13/cast/caste.go
@@ -6,6 +6,7 @@
package cast
import (
+ "errors"
"fmt"
"html/template"
"reflect"
@@ -14,7 +15,9 @@ import (
"time"
)
-// ToTimeE casts an empty interface to time.Time.
+var errNegativeNotAllowed = errors.New("unable to cast negative value")
+
+// ToTimeE casts an interface to a time.Time type.
func ToTimeE(i interface{}) (tim time.Time, err error) {
i = indirect(i)
@@ -22,30 +25,32 @@ func ToTimeE(i interface{}) (tim time.Time, err error) {
case time.Time:
return v, nil
case string:
- d, e := StringToDate(v)
- if e == nil {
- return d, nil
- }
- return time.Time{}, fmt.Errorf("Could not parse Date/Time format: %v\n", e)
+ return StringToDate(v)
case int:
return time.Unix(int64(v), 0), nil
- case int32:
- return time.Unix(int64(v), 0), nil
case int64:
return time.Unix(v, 0), nil
+ case int32:
+ return time.Unix(int64(v), 0), nil
+ case uint:
+ return time.Unix(int64(v), 0), nil
+ case uint64:
+ return time.Unix(int64(v), 0), nil
+ case uint32:
+ return time.Unix(int64(v), 0), nil
default:
- return time.Time{}, fmt.Errorf("Unable to Cast %#v to Time\n", i)
+ return time.Time{}, fmt.Errorf("unable to cast %#v of type %T to Time", i, i)
}
}
-// ToDurationE casts an empty interface to time.Duration.
+// ToDurationE casts an interface to a time.Duration type.
func ToDurationE(i interface{}) (d time.Duration, err error) {
i = indirect(i)
switch s := i.(type) {
case time.Duration:
return s, nil
- case int64, int32, int16, int8, int:
+ case int, int64, int32, int16, int8, uint, uint64, uint32, uint16, uint8:
d = time.Duration(ToInt64(s))
return
case float32, float64:
@@ -59,14 +64,13 @@ func ToDurationE(i interface{}) (d time.Duration, err error) {
}
return
default:
- err = fmt.Errorf("Unable to Cast %#v to Duration\n", i)
+ err = fmt.Errorf("unable to cast %#v of type %T to Duration", i, i)
return
}
}
-// ToBoolE casts an empty interface to a bool.
+// ToBoolE casts an interface to a bool type.
func ToBoolE(i interface{}) (bool, error) {
-
i = indirect(i)
switch b := i.(type) {
@@ -82,11 +86,11 @@ func ToBoolE(i interface{}) (bool, error) {
case string:
return strconv.ParseBool(i.(string))
default:
- return false, fmt.Errorf("Unable to Cast %#v to bool", i)
+ return false, fmt.Errorf("unable to cast %#v of type %T to bool", i, i)
}
}
-// ToFloat64E casts an empty interface to a float64.
+// ToFloat64E casts an interface to a float64 type.
func ToFloat64E(i interface{}) (float64, error) {
i = indirect(i)
@@ -95,6 +99,8 @@ func ToFloat64E(i interface{}) (float64, error) {
return s, nil
case float32:
return float64(s), nil
+ case int:
+ return float64(s), nil
case int64:
return float64(s), nil
case int32:
@@ -103,55 +109,266 @@ func ToFloat64E(i interface{}) (float64, error) {
return float64(s), nil
case int8:
return float64(s), nil
- case int:
+ case uint:
+ return float64(s), nil
+ case uint64:
+ return float64(s), nil
+ case uint32:
+ return float64(s), nil
+ case uint16:
+ return float64(s), nil
+ case uint8:
return float64(s), nil
case string:
v, err := strconv.ParseFloat(s, 64)
if err == nil {
- return float64(v), nil
+ return v, nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i)
+ case bool:
+ if s {
+ return 1, nil
}
- return 0.0, fmt.Errorf("Unable to Cast %#v to float", i)
+ return 0, nil
default:
- return 0.0, fmt.Errorf("Unable to Cast %#v to float", i)
+ return 0, fmt.Errorf("unable to cast %#v of type %T to float64", i, i)
}
}
-// ToInt64E casts an empty interface to an int64.
-func ToInt64E(i interface{}) (int64, error) {
+// ToFloat32E casts an interface to a float32 type.
+func ToFloat32E(i interface{}) (float32, error) {
i = indirect(i)
switch s := i.(type) {
- case int64:
+ case float64:
+ return float32(s), nil
+ case float32:
return s, nil
case int:
+ return float32(s), nil
+ case int64:
+ return float32(s), nil
+ case int32:
+ return float32(s), nil
+ case int16:
+ return float32(s), nil
+ case int8:
+ return float32(s), nil
+ case uint:
+ return float32(s), nil
+ case uint64:
+ return float32(s), nil
+ case uint32:
+ return float32(s), nil
+ case uint16:
+ return float32(s), nil
+ case uint8:
+ return float32(s), nil
+ case string:
+ v, err := strconv.ParseFloat(s, 32)
+ if err == nil {
+ return float32(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i)
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to float32", i, i)
+ }
+}
+
+// ToInt64E casts an interface to an int64 type.
+func ToInt64E(i interface{}) (int64, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case int:
return int64(s), nil
+ case int64:
+ return s, nil
case int32:
return int64(s), nil
case int16:
return int64(s), nil
case int8:
return int64(s), nil
+ case uint:
+ return int64(s), nil
+ case uint64:
+ return int64(s), nil
+ case uint32:
+ return int64(s), nil
+ case uint16:
+ return int64(s), nil
+ case uint8:
+ return int64(s), nil
+ case float64:
+ return int64(s), nil
+ case float32:
+ return int64(s), nil
case string:
v, err := strconv.ParseInt(s, 0, 0)
if err == nil {
return v, nil
}
- return 0, fmt.Errorf("Unable to Cast %#v to int64", i)
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i)
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int64", i, i)
+ }
+}
+
+// ToInt32E casts an interface to an int32 type.
+func ToInt32E(i interface{}) (int32, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case int:
+ return int32(s), nil
+ case int64:
+ return int32(s), nil
+ case int32:
+ return s, nil
+ case int16:
+ return int32(s), nil
+ case int8:
+ return int32(s), nil
+ case uint:
+ return int32(s), nil
+ case uint64:
+ return int32(s), nil
+ case uint32:
+ return int32(s), nil
+ case uint16:
+ return int32(s), nil
+ case uint8:
+ return int32(s), nil
case float64:
- return int64(s), nil
+ return int32(s), nil
+ case float32:
+ return int32(s), nil
+ case string:
+ v, err := strconv.ParseInt(s, 0, 0)
+ if err == nil {
+ return int32(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i)
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int32", i, i)
+ }
+}
+
+// ToInt16E casts an interface to an int16 type.
+func ToInt16E(i interface{}) (int16, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case int:
+ return int16(s), nil
+ case int64:
+ return int16(s), nil
+ case int32:
+ return int16(s), nil
+ case int16:
+ return s, nil
+ case int8:
+ return int16(s), nil
+ case uint:
+ return int16(s), nil
+ case uint64:
+ return int16(s), nil
+ case uint32:
+ return int16(s), nil
+ case uint16:
+ return int16(s), nil
+ case uint8:
+ return int16(s), nil
+ case float64:
+ return int16(s), nil
+ case float32:
+ return int16(s), nil
+ case string:
+ v, err := strconv.ParseInt(s, 0, 0)
+ if err == nil {
+ return int16(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i)
case bool:
- if bool(s) {
- return int64(1), nil
+ if s {
+ return 1, nil
}
- return int64(0), nil
+ return 0, nil
case nil:
- return int64(0), nil
+ return 0, nil
default:
- return int64(0), fmt.Errorf("Unable to Cast %#v to int64", i)
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int16", i, i)
}
}
-// ToIntE casts an empty interface to an int.
+// ToInt8E casts an interface to an int8 type.
+func ToInt8E(i interface{}) (int8, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case int:
+ return int8(s), nil
+ case int64:
+ return int8(s), nil
+ case int32:
+ return int8(s), nil
+ case int16:
+ return int8(s), nil
+ case int8:
+ return s, nil
+ case uint:
+ return int8(s), nil
+ case uint64:
+ return int8(s), nil
+ case uint32:
+ return int8(s), nil
+ case uint16:
+ return int8(s), nil
+ case uint8:
+ return int8(s), nil
+ case float64:
+ return int8(s), nil
+ case float32:
+ return int8(s), nil
+ case string:
+ v, err := strconv.ParseInt(s, 0, 0)
+ if err == nil {
+ return int8(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i)
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int8", i, i)
+ }
+}
+
+// ToIntE casts an interface to an int type.
func ToIntE(i interface{}) (int, error) {
i = indirect(i)
@@ -166,23 +383,375 @@ func ToIntE(i interface{}) (int, error) {
return int(s), nil
case int8:
return int(s), nil
+ case uint:
+ return int(s), nil
+ case uint64:
+ return int(s), nil
+ case uint32:
+ return int(s), nil
+ case uint16:
+ return int(s), nil
+ case uint8:
+ return int(s), nil
+ case float64:
+ return int(s), nil
+ case float32:
+ return int(s), nil
case string:
v, err := strconv.ParseInt(s, 0, 0)
if err == nil {
return int(v), nil
}
- return 0, fmt.Errorf("Unable to Cast %#v to int", i)
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int", i, i)
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to int", i, i)
+ }
+}
+
+// ToUintE casts an interface to a uint type.
+func ToUintE(i interface{}) (uint, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case string:
+ v, err := strconv.ParseUint(s, 0, 0)
+ if err == nil {
+ return uint(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v to uint: %s", i, err)
+ case int:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(s), nil
+ case int64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(s), nil
+ case int32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(s), nil
+ case int16:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(s), nil
+ case int8:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(s), nil
+ case uint:
+ return s, nil
+ case uint64:
+ return uint(s), nil
+ case uint32:
+ return uint(s), nil
+ case uint16:
+ return uint(s), nil
+ case uint8:
+ return uint(s), nil
case float64:
- return int(s), nil
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(s), nil
+ case float32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint(s), nil
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to uint", i, i)
+ }
+}
+
+// ToUint64E casts an interface to a uint64 type.
+func ToUint64E(i interface{}) (uint64, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case string:
+ v, err := strconv.ParseUint(s, 0, 64)
+ if err == nil {
+ return v, nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v to uint64: %s", i, err)
+ case int:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(s), nil
+ case int64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(s), nil
+ case int32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(s), nil
+ case int16:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(s), nil
+ case int8:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(s), nil
+ case uint:
+ return uint64(s), nil
+ case uint64:
+ return s, nil
+ case uint32:
+ return uint64(s), nil
+ case uint16:
+ return uint64(s), nil
+ case uint8:
+ return uint64(s), nil
+ case float32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(s), nil
+ case float64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint64(s), nil
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to uint64", i, i)
+ }
+}
+
+// ToUint32E casts an interface to a uint32 type.
+func ToUint32E(i interface{}) (uint32, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case string:
+ v, err := strconv.ParseUint(s, 0, 32)
+ if err == nil {
+ return uint32(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v to uint32: %s", i, err)
+ case int:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(s), nil
+ case int64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(s), nil
+ case int32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(s), nil
+ case int16:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(s), nil
+ case int8:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(s), nil
+ case uint:
+ return uint32(s), nil
+ case uint64:
+ return uint32(s), nil
+ case uint32:
+ return s, nil
+ case uint16:
+ return uint32(s), nil
+ case uint8:
+ return uint32(s), nil
+ case float64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(s), nil
+ case float32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint32(s), nil
case bool:
- if bool(s) {
+ if s {
return 1, nil
}
return 0, nil
case nil:
return 0, nil
default:
- return 0, fmt.Errorf("Unable to Cast %#v to int", i)
+ return 0, fmt.Errorf("unable to cast %#v of type %T to uint32", i, i)
+ }
+}
+
+// ToUint16E casts an interface to a uint16 type.
+func ToUint16E(i interface{}) (uint16, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case string:
+ v, err := strconv.ParseUint(s, 0, 16)
+ if err == nil {
+ return uint16(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v to uint16: %s", i, err)
+ case int:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(s), nil
+ case int64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(s), nil
+ case int32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(s), nil
+ case int16:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(s), nil
+ case int8:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(s), nil
+ case uint:
+ return uint16(s), nil
+ case uint64:
+ return uint16(s), nil
+ case uint32:
+ return uint16(s), nil
+ case uint16:
+ return s, nil
+ case uint8:
+ return uint16(s), nil
+ case float64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(s), nil
+ case float32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint16(s), nil
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to uint16", i, i)
+ }
+}
+
+// ToUint8E casts an interface to a uint type.
+func ToUint8E(i interface{}) (uint8, error) {
+ i = indirect(i)
+
+ switch s := i.(type) {
+ case string:
+ v, err := strconv.ParseUint(s, 0, 8)
+ if err == nil {
+ return uint8(v), nil
+ }
+ return 0, fmt.Errorf("unable to cast %#v to uint8: %s", i, err)
+ case int:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(s), nil
+ case int64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(s), nil
+ case int32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(s), nil
+ case int16:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(s), nil
+ case int8:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(s), nil
+ case uint:
+ return uint8(s), nil
+ case uint64:
+ return uint8(s), nil
+ case uint32:
+ return uint8(s), nil
+ case uint16:
+ return uint8(s), nil
+ case uint8:
+ return s, nil
+ case float64:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(s), nil
+ case float32:
+ if s < 0 {
+ return 0, errNegativeNotAllowed
+ }
+ return uint8(s), nil
+ case bool:
+ if s {
+ return 1, nil
+ }
+ return 0, nil
+ case nil:
+ return 0, nil
+ default:
+ return 0, fmt.Errorf("unable to cast %#v of type %T to uint8", i, i)
}
}
@@ -225,7 +794,7 @@ func indirectToStringerOrError(a interface{}) interface{} {
return v.Interface()
}
-// ToStringE casts an empty interface to a string.
+// ToStringE casts an interface to a string type.
func ToStringE(i interface{}) (string, error) {
i = indirectToStringerOrError(i)
@@ -235,11 +804,29 @@ func ToStringE(i interface{}) (string, error) {
case bool:
return strconv.FormatBool(s), nil
case float64:
- return strconv.FormatFloat(i.(float64), 'f', -1, 64), nil
- case int64:
- return strconv.FormatInt(i.(int64), 10), nil
+ return strconv.FormatFloat(s, 'f', -1, 64), nil
+ case float32:
+ return strconv.FormatFloat(float64(s), 'f', -1, 32), nil
case int:
- return strconv.FormatInt(int64(i.(int)), 10), nil
+ return strconv.Itoa(s), nil
+ case int64:
+ return strconv.FormatInt(s, 10), nil
+ case int32:
+ return strconv.Itoa(int(s)), nil
+ case int16:
+ return strconv.FormatInt(int64(s), 10), nil
+ case int8:
+ return strconv.FormatInt(int64(s), 10), nil
+ case uint:
+ return strconv.FormatInt(int64(s), 10), nil
+ case uint64:
+ return strconv.FormatInt(int64(s), 10), nil
+ case uint32:
+ return strconv.FormatInt(int64(s), 10), nil
+ case uint16:
+ return strconv.FormatInt(int64(s), 10), nil
+ case uint8:
+ return strconv.FormatInt(int64(s), 10), nil
case []byte:
return string(s), nil
case template.HTML:
@@ -259,13 +846,12 @@ func ToStringE(i interface{}) (string, error) {
case error:
return s.Error(), nil
default:
- return "", fmt.Errorf("Unable to Cast %#v to string", i)
+ return "", fmt.Errorf("unable to cast %#v of type %T to string", i, i)
}
}
-// ToStringMapStringE casts an empty interface to a map[string]string.
+// ToStringMapStringE casts an interface to a map[string]string type.
func ToStringMapStringE(i interface{}) (map[string]string, error) {
-
var m = map[string]string{}
switch v := i.(type) {
@@ -287,13 +873,12 @@ func ToStringMapStringE(i interface{}) (map[string]string, error) {
}
return m, nil
default:
- return m, fmt.Errorf("Unable to Cast %#v to map[string]string", i)
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]string", i, i)
}
}
-// ToStringMapStringSliceE casts an empty interface to a map[string][]string.
+// ToStringMapStringSliceE casts an interface to a map[string][]string type.
func ToStringMapStringSliceE(i interface{}) (map[string][]string, error) {
-
var m = map[string][]string{}
switch v := i.(type) {
@@ -339,23 +924,22 @@ func ToStringMapStringSliceE(i interface{}) (map[string][]string, error) {
for k, val := range v {
key, err := ToStringE(k)
if err != nil {
- return m, fmt.Errorf("Unable to Cast %#v to map[string][]string", i)
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i)
}
value, err := ToStringSliceE(val)
if err != nil {
- return m, fmt.Errorf("Unable to Cast %#v to map[string][]string", i)
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i)
}
m[key] = value
}
default:
- return m, fmt.Errorf("Unable to Cast %#v to map[string][]string", i)
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string][]string", i, i)
}
return m, nil
}
-// ToStringMapBoolE casts an empty interface to a map[string]bool.
+// ToStringMapBoolE casts an interface to a map[string]bool type.
func ToStringMapBoolE(i interface{}) (map[string]bool, error) {
-
var m = map[string]bool{}
switch v := i.(type) {
@@ -372,13 +956,12 @@ func ToStringMapBoolE(i interface{}) (map[string]bool, error) {
case map[string]bool:
return v, nil
default:
- return m, fmt.Errorf("Unable to Cast %#v to map[string]bool", i)
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]bool", i, i)
}
}
-// ToStringMapE casts an empty interface to a map[string]interface{}.
+// ToStringMapE casts an interface to a map[string]interface{} type.
func ToStringMapE(i interface{}) (map[string]interface{}, error) {
-
var m = map[string]interface{}{}
switch v := i.(type) {
@@ -390,36 +973,31 @@ func ToStringMapE(i interface{}) (map[string]interface{}, error) {
case map[string]interface{}:
return v, nil
default:
- return m, fmt.Errorf("Unable to Cast %#v to map[string]interface{}", i)
+ return m, fmt.Errorf("unable to cast %#v of type %T to map[string]interface{}", i, i)
}
}
-// ToSliceE casts an empty interface to a []interface{}.
+// ToSliceE casts an interface to a []interface{} type.
func ToSliceE(i interface{}) ([]interface{}, error) {
-
var s []interface{}
switch v := i.(type) {
case []interface{}:
- for _, u := range v {
- s = append(s, u)
- }
- return s, nil
+ return append(s, v...), nil
case []map[string]interface{}:
for _, u := range v {
s = append(s, u)
}
return s, nil
default:
- return s, fmt.Errorf("Unable to Cast %#v of type %v to []interface{}", i, reflect.TypeOf(i))
+ return s, fmt.Errorf("unable to cast %#v of type %T to []interface{}", i, i)
}
}
-// ToBoolSliceE casts an empty interface to a []bool.
+// ToBoolSliceE casts an interface to a []bool type.
func ToBoolSliceE(i interface{}) ([]bool, error) {
-
if i == nil {
- return []bool{}, fmt.Errorf("Unable to Cast %#v to []bool", i)
+ return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i)
}
switch v := i.(type) {
@@ -435,19 +1013,18 @@ func ToBoolSliceE(i interface{}) ([]bool, error) {
for j := 0; j < s.Len(); j++ {
val, err := ToBoolE(s.Index(j).Interface())
if err != nil {
- return []bool{}, fmt.Errorf("Unable to Cast %#v to []bool", i)
+ return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i)
}
a[j] = val
}
return a, nil
default:
- return []bool{}, fmt.Errorf("Unable to Cast %#v to []bool", i)
+ return []bool{}, fmt.Errorf("unable to cast %#v of type %T to []bool", i, i)
}
}
-// ToStringSliceE casts an empty interface to a []string.
+// ToStringSliceE casts an interface to a []string type.
func ToStringSliceE(i interface{}) ([]string, error) {
-
var a []string
switch v := i.(type) {
@@ -463,19 +1040,18 @@ func ToStringSliceE(i interface{}) ([]string, error) {
case interface{}:
str, err := ToStringE(v)
if err != nil {
- return a, fmt.Errorf("Unable to Cast %#v to []string", i)
+ return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i)
}
return []string{str}, nil
default:
- return a, fmt.Errorf("Unable to Cast %#v to []string", i)
+ return a, fmt.Errorf("unable to cast %#v of type %T to []string", i, i)
}
}
-// ToIntSliceE casts an empty interface to a []int.
+// ToIntSliceE casts an interface to a []int type.
func ToIntSliceE(i interface{}) ([]int, error) {
-
if i == nil {
- return []int{}, fmt.Errorf("Unable to Cast %#v to []int", i)
+ return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i)
}
switch v := i.(type) {
@@ -491,17 +1067,19 @@ func ToIntSliceE(i interface{}) ([]int, error) {
for j := 0; j < s.Len(); j++ {
val, err := ToIntE(s.Index(j).Interface())
if err != nil {
- return []int{}, fmt.Errorf("Unable to Cast %#v to []int", i)
+ return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i)
}
a[j] = val
}
return a, nil
default:
- return []int{}, fmt.Errorf("Unable to Cast %#v to []int", i)
+ return []int{}, fmt.Errorf("unable to cast %#v of type %T to []int", i, i)
}
}
-// StringToDate casts an empty interface to a time.Time.
+// StringToDate attempts to parse a string into a time.Time type using a
+// predefined list of formats. If no suitable format is found, an error is
+// returned.
func StringToDate(s string) (time.Time, error) {
return parseDateWith(s, []string{
time.RFC3339,
@@ -519,6 +1097,7 @@ func StringToDate(s string) (time.Time, error) {
"02 Jan 2006",
"2006-01-02 15:04:05 -07:00",
"2006-01-02 15:04:05 -0700",
+ "2006-01-02 15:04:05Z07:00", // RFC3339 without T
"2006-01-02 15:04:05",
time.Kitchen,
time.Stamp,
@@ -534,5 +1113,5 @@ func parseDateWith(s string, dates []string) (d time.Time, e error) {
return
}
}
- return d, fmt.Errorf("Unable to parse date: %s", s)
+ return d, fmt.Errorf("unable to parse date: %s", s)
}
diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go
index 746af63..3a2e255 100644
--- a/vendor/github.com/spf13/pflag/flag.go
+++ b/vendor/github.com/spf13/pflag/flag.go
@@ -134,10 +134,16 @@ type FlagSet struct {
// a custom error handler.
Usage func()
+ // SortFlags is used to indicate, if user wants to have sorted flags in
+ // help/usage messages.
+ SortFlags bool
+
name string
parsed bool
actual map[NormalizedName]*Flag
+ orderedActual []*Flag
formal map[NormalizedName]*Flag
+ orderedFormal []*Flag
shorthands map[byte]*Flag
args []string // arguments after flags
argsLenAtDash int // len(args) when a '--' was located when parsing, or -1 if no --
@@ -156,7 +162,7 @@ type Flag struct {
Value Value // value as set
DefValue string // default value (as text); for usage message
Changed bool // If the user set the value (or if left to default)
- NoOptDefVal string //default value (as text); if the flag is on the command line without any options
+ NoOptDefVal string // default value (as text); if the flag is on the command line without any options
Deprecated string // If this flag is deprecated, this string is the new or now thing to use
Hidden bool // used by cobra.Command to allow flags to be hidden from help/usage text
ShorthandDeprecated string // If the shorthand of this flag is deprecated, this string is the new or now thing to use
@@ -194,11 +200,12 @@ func sortFlags(flags map[NormalizedName]*Flag) []*Flag {
// "--getUrl" which may also be translated to "geturl" and everything will work.
func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) {
f.normalizeNameFunc = n
- for k, v := range f.formal {
- delete(f.formal, k)
- nname := f.normalizeFlagName(string(k))
- f.formal[nname] = v
+ for k, v := range f.orderedFormal {
+ delete(f.formal, NormalizedName(v.Name))
+ nname := f.normalizeFlagName(v.Name)
v.Name = string(nname)
+ f.formal[nname] = v
+ f.orderedFormal[k] = v
}
}
@@ -229,10 +236,18 @@ func (f *FlagSet) SetOutput(output io.Writer) {
f.output = output
}
-// VisitAll visits the flags in lexicographical order, calling fn for each.
+// VisitAll visits the flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
// It visits all flags, even those not set.
func (f *FlagSet) VisitAll(fn func(*Flag)) {
- for _, flag := range sortFlags(f.formal) {
+ var flags []*Flag
+ if f.SortFlags {
+ flags = sortFlags(f.formal)
+ } else {
+ flags = f.orderedFormal
+ }
+
+ for _, flag := range flags {
fn(flag)
}
}
@@ -253,22 +268,32 @@ func (f *FlagSet) HasAvailableFlags() bool {
return false
}
-// VisitAll visits the command-line flags in lexicographical order, calling
-// fn for each. It visits all flags, even those not set.
+// VisitAll visits the command-line flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
+// It visits all flags, even those not set.
func VisitAll(fn func(*Flag)) {
CommandLine.VisitAll(fn)
}
-// Visit visits the flags in lexicographical order, calling fn for each.
+// Visit visits the flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
// It visits only those flags that have been set.
func (f *FlagSet) Visit(fn func(*Flag)) {
- for _, flag := range sortFlags(f.actual) {
+ var flags []*Flag
+ if f.SortFlags {
+ flags = sortFlags(f.actual)
+ } else {
+ flags = f.orderedActual
+ }
+
+ for _, flag := range flags {
fn(flag)
}
}
-// Visit visits the command-line flags in lexicographical order, calling fn
-// for each. It visits only those flags that have been set.
+// Visit visits the command-line flags in lexicographical order or
+// in primordial order if f.SortFlags is false, calling fn for each.
+// It visits only those flags that have been set.
func Visit(fn func(*Flag)) {
CommandLine.Visit(fn)
}
@@ -373,6 +398,7 @@ func (f *FlagSet) Set(name, value string) error {
f.actual = make(map[NormalizedName]*Flag)
}
f.actual[normalName] = flag
+ f.orderedActual = append(f.orderedActual, flag)
flag.Changed = true
if len(flag.Deprecated) > 0 {
fmt.Fprintf(os.Stderr, "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated)
@@ -601,7 +627,7 @@ func (f *FlagSet) FlagUsagesWrapped(cols int) string {
line += usage
if !flag.defaultIsZeroValue() {
if flag.Value.Type() == "string" {
- line += fmt.Sprintf(" (default \"%s\")", flag.DefValue)
+ line += fmt.Sprintf(" (default %q)", flag.DefValue)
} else {
line += fmt.Sprintf(" (default %s)", flag.DefValue)
}
@@ -729,6 +755,7 @@ func (f *FlagSet) AddFlag(flag *Flag) {
flag.Name = string(normalizedFlagName)
f.formal[normalizedFlagName] = flag
+ f.orderedFormal = append(f.orderedFormal, flag)
if len(flag.Shorthand) == 0 {
return
@@ -807,6 +834,7 @@ func (f *FlagSet) setFlag(flag *Flag, value string, origArg string) error {
f.actual = make(map[NormalizedName]*Flag)
}
f.actual[f.normalizeFlagName(flag.Name)] = flag
+ f.orderedActual = append(f.orderedActual, flag)
flag.Changed = true
if len(flag.Deprecated) > 0 {
fmt.Fprintf(os.Stderr, "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated)
@@ -1036,14 +1064,15 @@ func Parsed() bool {
// CommandLine is the default set of command-line flags, parsed from os.Args.
var CommandLine = NewFlagSet(os.Args[0], ExitOnError)
-// NewFlagSet returns a new, empty flag set with the specified name and
-// error handling property.
+// NewFlagSet returns a new, empty flag set with the specified name,
+// error handling property and SortFlags set to true.
func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet {
f := &FlagSet{
name: name,
errorHandling: errorHandling,
argsLenAtDash: -1,
interspersed: true,
+ SortFlags: true,
}
return f
}
diff --git a/vendor/github.com/spf13/viper/viper.go b/vendor/github.com/spf13/viper/viper.go
index fce13b1..5ca66ae 100644
--- a/vendor/github.com/spf13/viper/viper.go
+++ b/vendor/github.com/spf13/viper/viper.go
@@ -40,6 +40,11 @@ import (
var v *Viper
+type RemoteResponse struct {
+ Value []byte
+ Error error
+}
+
func init() {
v = New()
}
@@ -47,6 +52,7 @@ func init() {
type remoteConfigFactory interface {
Get(rp RemoteProvider) (io.Reader, error)
Watch(rp RemoteProvider) (io.Reader, error)
+ WatchChannel(rp RemoteProvider)(<-chan *RemoteResponse, chan bool)
}
// RemoteConfig is optional, see the remote package
@@ -713,7 +719,15 @@ func (v *Viper) GetSizeInBytes(key string) uint {
// UnmarshalKey takes a single key and unmarshals it into a Struct.
func UnmarshalKey(key string, rawVal interface{}) error { return v.UnmarshalKey(key, rawVal) }
func (v *Viper) UnmarshalKey(key string, rawVal interface{}) error {
- return mapstructure.Decode(v.Get(key), rawVal)
+ err := decode(v.Get(key), defaultDecoderConfig(rawVal))
+
+ if err != nil {
+ return err
+ }
+
+ v.insensitiviseMaps()
+
+ return nil
}
// Unmarshal unmarshals the config into a Struct. Make sure that the tags
@@ -1255,6 +1269,10 @@ func (v *Viper) WatchRemoteConfig() error {
return v.watchKeyValueConfig()
}
+func (v *Viper) WatchRemoteConfigOnChannel() error {
+ return v.watchKeyValueConfigOnChannel()
+}
+
// Unmarshall a Reader into a map.
// Should probably be an unexported function.
func unmarshalReader(in io.Reader, c map[string]interface{}) error {
@@ -1299,6 +1317,23 @@ func (v *Viper) getRemoteConfig(provider RemoteProvider) (map[string]interface{}
}
// Retrieve the first found remote configuration.
+func (v *Viper) watchKeyValueConfigOnChannel() error {
+ for _, rp := range v.remoteProviders {
+ respc, _ := RemoteConfig.WatchChannel(rp)
+ //Todo: Add quit channel
+ go func(rc <-chan *RemoteResponse) {
+ for {
+ b := <-rc
+ reader := bytes.NewReader(b.Value)
+ v.unmarshalReader(reader, v.kvstore)
+ }
+ }(respc)
+ return nil
+ }
+ return RemoteConfigError("No Files Found")
+}
+
+// Retrieve the first found remote configuration.
func (v *Viper) watchKeyValueConfig() error {
for _, rp := range v.remoteProviders {
val, err := v.watchRemoteConfig(rp)
diff --git a/vendor/github.com/xanzy/go-gitlab/README.md b/vendor/github.com/xanzy/go-gitlab/README.md
index 919b97f..7b6d1bc 100644
--- a/vendor/github.com/xanzy/go-gitlab/README.md
+++ b/vendor/github.com/xanzy/go-gitlab/README.md
@@ -7,11 +7,8 @@ A GitLab API client enabling Go programs to interact with GitLab in a simple and
## NOTE
-Release v0.2.0 (released on 26-07-2016), is unfortunately backwards incompatible. We
-understand very well that this will cause some additional work in order to get your
-code working again, but we believe this is a necessary eval to improve functionality
-and fix some use cases (see [GH-29](https://github.com/xanzy/go-gitlab/issues/29) and
-[GH-53](https://github.com/xanzy/go-gitlab/issues/53)).
+Release v0.5.0 (released on 22-03-2017) no longer supports Go versions older
+then 1.7.x If you want (or need) to use an older Go version please use v0.4.1
## Coverage
diff --git a/vendor/github.com/xanzy/go-gitlab/branches.go b/vendor/github.com/xanzy/go-gitlab/branches.go
index adb4c73..838245e 100644
--- a/vendor/github.com/xanzy/go-gitlab/branches.go
+++ b/vendor/github.com/xanzy/go-gitlab/branches.go
@@ -24,37 +24,50 @@ import (
// BranchesService handles communication with the branch related methods
// of the GitLab API.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/branches.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/branches.md
type BranchesService struct {
client *Client
}
// Branch represents a GitLab branch.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/branches.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/branches.md
type Branch struct {
- Commit *Commit `json:"commit"`
- Name string `json:"name"`
- Protected bool `json:"protected"`
+ Commit *Commit `json:"commit"`
+ Name string `json:"name"`
+ Protected bool `json:"protected"`
+ Merged bool `json:"merged"`
+ DevelopersCanPush bool `json:"developers_can_push"`
+ DevelopersCanMerge bool `json:"developers_can_merge"`
}
func (b Branch) String() string {
return Stringify(b)
}
+// ListBranchesOptions represents the available ListBranches() options.
+//
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/branches.md#list-repository-branches
+type ListBranchesOptions struct {
+ ListOptions
+}
+
// ListBranches gets a list of repository branches from a project, sorted by
// name alphabetically.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/branches.html#list-repository-branches
-func (s *BranchesService) ListBranches(pid interface{}, options ...OptionFunc) ([]*Branch, *Response, error) {
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/branches.md#list-repository-branches
+func (s *BranchesService) ListBranches(pid interface{}, opts *ListBranchesOptions, options ...OptionFunc) ([]*Branch, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
u := fmt.Sprintf("projects/%s/repository/branches", url.QueryEscape(project))
- req, err := s.client.NewRequest("GET", u, nil, options)
+ req, err := s.client.NewRequest("GET", u, opts, options)
if err != nil {
return nil, nil, err
}
@@ -71,7 +84,7 @@ func (s *BranchesService) ListBranches(pid interface{}, options ...OptionFunc) (
// GetBranch gets a single project repository branch.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/branches.html#get-single-repository-branch
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/branches.md#get-single-repository-branch
func (s *BranchesService) GetBranch(pid interface{}, branch string, options ...OptionFunc) (*Branch, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -93,20 +106,29 @@ func (s *BranchesService) GetBranch(pid interface{}, branch string, options ...O
return b, resp, err
}
+// ProtectBranchOptions represents the available ProtectBranch() options.
+//
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/branches.md#protect-repository-branch
+type ProtectBranchOptions struct {
+ DevelopersCanPush *bool `url:"developers_can_push,omitempty" json:"developers_can_push,omitempty"`
+ DevelopersCanMerge *bool `url:"developers_can_merge,omitempty" json:"developers_can_merge,omitempty"`
+}
+
// ProtectBranch protects a single project repository branch. This is an
// idempotent function, protecting an already protected repository branch
// still returns a 200 OK status code.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/branches.html#protect-repository-branch
-func (s *BranchesService) ProtectBranch(pid interface{}, branch string, options ...OptionFunc) (*Branch, *Response, error) {
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/branches.md#protect-repository-branch
+func (s *BranchesService) ProtectBranch(pid interface{}, branch string, opts *ProtectBranchOptions, options ...OptionFunc) (*Branch, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
u := fmt.Sprintf("projects/%s/repository/branches/%s/protect", url.QueryEscape(project), branch)
- req, err := s.client.NewRequest("PUT", u, nil, options)
+ req, err := s.client.NewRequest("PUT", u, opts, options)
if err != nil {
return nil, nil, err
}
@@ -125,7 +147,7 @@ func (s *BranchesService) ProtectBranch(pid interface{}, branch string, options
// still returns a 200 OK status code.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/branches.html#unprotect-repository-branch
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/branches.md#unprotect-repository-branch
func (s *BranchesService) UnprotectBranch(pid interface{}, branch string, options ...OptionFunc) (*Branch, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -150,7 +172,7 @@ func (s *BranchesService) UnprotectBranch(pid interface{}, branch string, option
// CreateBranchOptions represents the available CreateBranch() options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/branches.html#create-repository-branch
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/branches.md#create-repository-branch
type CreateBranchOptions struct {
BranchName *string `url:"branch_name,omitempty" json:"branch_name,omitempty"`
Ref *string `url:"ref,omitempty" json:"ref,omitempty"`
@@ -159,7 +181,7 @@ type CreateBranchOptions struct {
// CreateBranch creates branch from commit SHA or existing branch.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/branches.html#create-repository-branch
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/branches.md#create-repository-branch
func (s *BranchesService) CreateBranch(pid interface{}, opt *CreateBranchOptions, options ...OptionFunc) (*Branch, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -184,7 +206,7 @@ func (s *BranchesService) CreateBranch(pid interface{}, opt *CreateBranchOptions
// DeleteBranch deletes an existing branch.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/branches.html#delete-repository-branch
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/branches.md#delete-repository-branch
func (s *BranchesService) DeleteBranch(pid interface{}, branch string, options ...OptionFunc) (*Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -199,3 +221,22 @@ func (s *BranchesService) DeleteBranch(pid interface{}, branch string, options .
return s.client.Do(req, nil)
}
+
+// DeleteMergedBranches deletes all branches that are merged into the project's default branch.
+//
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/branches.md#delete-merged-branches
+func (s *BranchesService) DeleteMergedBranches(pid interface{}, options ...OptionFunc) (*Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, err
+ }
+ u := fmt.Sprintf("projects/%s/repository/merged_branches", url.QueryEscape(project))
+
+ req, err := s.client.NewRequest("DELETE", u, nil, options)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
diff --git a/vendor/github.com/xanzy/go-gitlab/build_variables.go b/vendor/github.com/xanzy/go-gitlab/build_variables.go
index a1bdf7f..a966825 100644
--- a/vendor/github.com/xanzy/go-gitlab/build_variables.go
+++ b/vendor/github.com/xanzy/go-gitlab/build_variables.go
@@ -8,14 +8,14 @@ import (
// BuildVariablesService handles communication with the project variables related methods
// of the Gitlab API
//
-// Gitlab API Docs : https://docs.gitlab.com/ce/api/build_variables.html
+// Gitlab API Docs : https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/build_variables.md
type BuildVariablesService struct {
client *Client
}
// BuildVariable represents a variable available for each build of the given project
//
-// Gitlab API Docs : https://docs.gitlab.com/ce/api/build_variables.html
+// Gitlab API Docs : https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/build_variables.md
type BuildVariable struct {
Key string `json:"key"`
Value string `json:"value"`
@@ -25,18 +25,26 @@ func (v BuildVariable) String() string {
return Stringify(v)
}
+// ListBuildVariablesOptions are the parameters to ListBuildVariables()
+//
+// Gitlab API Docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/build_variables.md#list-project-variables
+type ListBuildVariablesOptions struct {
+ ListOptions
+}
+
// ListBuildVariables gets the a list of project variables in a project
//
// Gitlab API Docs:
-// https://docs.gitlab.com/ce/api/build_variables.html#list-project-variables
-func (s *BuildVariablesService) ListBuildVariables(pid interface{}, options ...OptionFunc) ([]*BuildVariable, *Response, error) {
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/build_variables.md#list-project-variables
+func (s *BuildVariablesService) ListBuildVariables(pid interface{}, opts *ListBuildVariablesOptions, options ...OptionFunc) ([]*BuildVariable, *Response, error) {
project, err := parseID(pid)
if err != nil {
return nil, nil, err
}
u := fmt.Sprintf("projects/%s/variables", url.QueryEscape(project))
- req, err := s.client.NewRequest("GET", u, nil, options)
+ req, err := s.client.NewRequest("GET", u, opts, options)
if err != nil {
return nil, nil, err
}
@@ -53,7 +61,7 @@ func (s *BuildVariablesService) ListBuildVariables(pid interface{}, options ...O
// GetBuildVariable gets a single project variable of a project
//
// Gitlab API Docs:
-// https://docs.gitlab.com/ce/api/build_variables.html#show-variable-details
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/build_variables.md#show-variable-details
func (s *BuildVariablesService) GetBuildVariable(pid interface{}, key string, options ...OptionFunc) (*BuildVariable, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -78,7 +86,7 @@ func (s *BuildVariablesService) GetBuildVariable(pid interface{}, key string, op
// CreateBuildVariable creates a variable for a given project
//
// Gitlab API Docs:
-// https://docs.gitlab.com/ce/api/build_variables.html#create-variable
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/build_variables.md#create-variable
func (s *BuildVariablesService) CreateBuildVariable(pid interface{}, key, value string, options ...OptionFunc) (*BuildVariable, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -104,7 +112,7 @@ func (s *BuildVariablesService) CreateBuildVariable(pid interface{}, key, value
// The variable key must exist
//
// Gitlab API Docs:
-// https://docs.gitlab.com/ce/api/build_variables.html#update-variable
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/build_variables.md#update-variable
func (s *BuildVariablesService) UpdateBuildVariable(pid interface{}, key, value string, options ...OptionFunc) (*BuildVariable, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -129,7 +137,7 @@ func (s *BuildVariablesService) UpdateBuildVariable(pid interface{}, key, value
// RemoveBuildVariable removes a project variable of a given project identified by its key
//
// Gitlab API Docs:
-// https://docs.gitlab.com/ce/api/build_variables.html#remove-variable
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/build_variables.md#remove-variable
func (s *BuildVariablesService) RemoveBuildVariable(pid interface{}, key string, options ...OptionFunc) (*Response, error) {
project, err := parseID(pid)
if err != nil {
diff --git a/vendor/github.com/xanzy/go-gitlab/builds.go b/vendor/github.com/xanzy/go-gitlab/builds.go
index dfee85b..9396729 100644
--- a/vendor/github.com/xanzy/go-gitlab/builds.go
+++ b/vendor/github.com/xanzy/go-gitlab/builds.go
@@ -33,14 +33,16 @@ type ListBuildsOptions struct {
// BuildsService handles communication with the ci builds related methods
// of the GitLab API.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/builds.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/builds.md
type BuildsService struct {
client *Client
}
// Build represents a ci build.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/builds.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/builds.md
type Build struct {
Commit *Commit `json:"commit"`
CreatedAt *time.Time `json:"created_at"`
@@ -72,7 +74,7 @@ type Build struct {
// failed, success, canceled; showing all builds if none provided.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/builds.html#list-project-builds
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/builds.md#list-project-builds
func (s *BuildsService) ListProjectBuilds(pid interface{}, opts *ListBuildsOptions, options ...OptionFunc) ([]Build, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -98,7 +100,7 @@ func (s *BuildsService) ListProjectBuilds(pid interface{}, opts *ListBuildsOptio
// project. If the commit SHA is not found, it will respond with 404.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/builds.html#list-commit-builds
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/builds.md#list-commit-builds
func (s *BuildsService) ListCommitBuilds(pid interface{}, sha string, opts *ListBuildsOptions, options ...OptionFunc) ([]Build, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -123,7 +125,7 @@ func (s *BuildsService) ListCommitBuilds(pid interface{}, sha string, opts *List
// GetBuild gets a single build of a project.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/builds.html#get-a-single-build
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/builds.md#get-a-single-build
func (s *BuildsService) GetBuild(pid interface{}, buildID int, options ...OptionFunc) (*Build, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -148,7 +150,7 @@ func (s *BuildsService) GetBuild(pid interface{}, buildID int, options ...Option
// GetBuildArtifacts get builds artifacts of a project
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/builds.html#get-build-artifacts
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/builds.md#get-build-artifacts
func (s *BuildsService) GetBuildArtifacts(pid interface{}, buildID int, options ...OptionFunc) (io.Reader, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -174,7 +176,7 @@ func (s *BuildsService) GetBuildArtifacts(pid interface{}, buildID int, options
// reference name and job provided the build finished successfully.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/builds.html#download-the-artifacts-file
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/builds.md#download-the-artifacts-file
func (s *BuildsService) DownloadArtifactsFile(pid interface{}, refName string, job string, options ...OptionFunc) (io.Reader, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -199,7 +201,7 @@ func (s *BuildsService) DownloadArtifactsFile(pid interface{}, refName string, j
// GetTraceFile gets a trace of a specific build of a project
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/builds.html#get-a-trace-file
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/builds.md#get-a-trace-file
func (s *BuildsService) GetTraceFile(pid interface{}, buildID int, options ...OptionFunc) (io.Reader, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -224,7 +226,7 @@ func (s *BuildsService) GetTraceFile(pid interface{}, buildID int, options ...Op
// CancelBuild cancels a single build of a project.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/builds.html#cancel-a-build
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/builds.md#cancel-a-build
func (s *BuildsService) CancelBuild(pid interface{}, buildID int, options ...OptionFunc) (*Build, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -249,7 +251,7 @@ func (s *BuildsService) CancelBuild(pid interface{}, buildID int, options ...Opt
// RetryBuild retries a single build of a project
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/builds.html#retry-a-build
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/builds.md#retry-a-build
func (s *BuildsService) RetryBuild(pid interface{}, buildID int, options ...OptionFunc) (*Build, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -275,7 +277,7 @@ func (s *BuildsService) RetryBuild(pid interface{}, buildID int, options ...Opti
// artifacts and a build trace.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/builds.html#erase-a-build
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/builds.md#erase-a-build
func (s *BuildsService) EraseBuild(pid interface{}, buildID int, options ...OptionFunc) (*Build, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -301,7 +303,7 @@ func (s *BuildsService) EraseBuild(pid interface{}, buildID int, options ...Opti
// expiration is set.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/builds.html#keep-artifacts
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/builds.md#keep-artifacts
func (s *BuildsService) KeepArtifacts(pid interface{}, buildID int, options ...OptionFunc) (*Build, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -326,7 +328,7 @@ func (s *BuildsService) KeepArtifacts(pid interface{}, buildID int, options ...O
// PlayBuild triggers a nanual action to start a build.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/builds.html#play-a-build
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/builds.md#play-a-build
func (s *BuildsService) PlayBuild(pid interface{}, buildID int, options ...OptionFunc) (*Build, *Response, error) {
project, err := parseID(pid)
if err != nil {
diff --git a/vendor/github.com/xanzy/go-gitlab/commits.go b/vendor/github.com/xanzy/go-gitlab/commits.go
index 7860f58..e2e7de8 100644
--- a/vendor/github.com/xanzy/go-gitlab/commits.go
+++ b/vendor/github.com/xanzy/go-gitlab/commits.go
@@ -25,25 +25,41 @@ import (
// CommitsService handles communication with the commit related methods
// of the GitLab API.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/commits.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md
type CommitsService struct {
client *Client
}
// Commit represents a GitLab commit.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/commits.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md
type Commit struct {
- ID string `json:"id"`
- ShortID string `json:"short_id"`
- Title string `json:"title"`
- AuthorName string `json:"author_name"`
- AuthorEmail string `json:"author_email"`
- AuthoredDate *time.Time `json:"authored_date"`
- CommittedDate *time.Time `json:"committed_date"`
- CreatedAt *time.Time `json:"created_at"`
- Message string `json:"message"`
- ParentsIds []string `json:"parents_ids"`
+ ID string `json:"id"`
+ ShortID string `json:"short_id"`
+ Title string `json:"title"`
+ AuthorName string `json:"author_name"`
+ AuthorEmail string `json:"author_email"`
+ AuthoredDate *time.Time `json:"authored_date"`
+ CommitterName string `json:"committer_name"`
+ CommitterEmail string `json:"committer_email"`
+ CommittedDate *time.Time `json:"committed_date"`
+ CreatedAt *time.Time `json:"created_at"`
+ Message string `json:"message"`
+ ParentIDs []string `json:"parent_ids"`
+ Stats *CommitStats `json:"stats"`
+ Status *BuildState `json:"status"`
+}
+
+// CommitStats represents the number of added and deleted files in a commit.
+//
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md
+type CommitStats struct {
+ Additions int `json:"additions"`
+ Deletions int `json:"deletions"`
+ Total int `json:"total"`
}
func (c Commit) String() string {
@@ -52,7 +68,8 @@ func (c Commit) String() string {
// ListCommitsOptions represents the available ListCommits() options.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/commits.html#list-repository-commits
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md#list-repository-commits
type ListCommitsOptions struct {
ListOptions
RefName *string `url:"ref_name,omitempty" json:"ref_name,omitempty"`
@@ -62,7 +79,8 @@ type ListCommitsOptions struct {
// ListCommits gets a list of repository commits in a project.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/commits.html#list-commits
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md#list-commits
func (s *CommitsService) ListCommits(pid interface{}, opt *ListCommitsOptions, options ...OptionFunc) ([]*Commit, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -84,10 +102,71 @@ func (s *CommitsService) ListCommits(pid interface{}, opt *ListCommitsOptions, o
return c, resp, err
}
+// FileAction represents the available actions that can be performed on a file.
+//
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md#create-a-commit-with-multiple-files-and-actions
+type FileAction string
+
+// The available file actions.
+const (
+ FileCreate FileAction = "create"
+ FileDelete FileAction = "delete"
+ FileMove FileAction = "move"
+ FileUpdate FileAction = "update"
+)
+
+// CommitAction represents a single file action within a commit.
+type CommitAction struct {
+ Action FileAction `url:"action" json:"action,omitempty"`
+ FilePath string `url:"file_path" json:"file_path,omitempty"`
+ PreviousPath string `url:"previous_path,omitempty" json:"previous_path,omitempty"`
+ Content string `url:"content,omitempty" json:"content,omitempty"`
+ Encoding string `url:"encoding,omitempty" json:"encoding,omitempty"`
+}
+
+// CreateCommitOptions represents the available options for a new commit.
+//
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md#create-a-commit-with-multiple-files-and-actions
+type CreateCommitOptions struct {
+ BranchName *string `url:"branch_name" json:"branch_name,omitempty"`
+ CommitMessage *string `url:"commit_message" json:"commit_message,omitempty"`
+ Actions []*CommitAction `url:"actions" json:"actions,omitempty"`
+ AuthorEmail *string `url:"author_email,omitempty" json:"author_email,omitempty"`
+ AuthorName *string `url:"author_name,omitempty" json:"author_name,omitempty"`
+}
+
+// CreateCommit creates a commit with multiple files and actions.
+//
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md#create-a-commit-with-multiple-files-and-actions
+func (s *CommitsService) CreateCommit(pid interface{}, opt *CreateCommitOptions, options ...OptionFunc) (*Commit, *Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, nil, err
+ }
+ u := fmt.Sprintf("projects/%s/repository/commits", url.QueryEscape(project))
+
+ req, err := s.client.NewRequest("POST", u, opt, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var c *Commit
+ resp, err := s.client.Do(req, &c)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return c, resp, err
+}
+
// GetCommit gets a specific commit identified by the commit hash or name of a
// branch or tag.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/commits.html#get-a-single-commit
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md#get-a-single-commit
func (s *CommitsService) GetCommit(pid interface{}, sha string, options ...OptionFunc) (*Commit, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -111,7 +190,8 @@ func (s *CommitsService) GetCommit(pid interface{}, sha string, options ...Optio
// Diff represents a GitLab diff.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/commits.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md
type Diff struct {
Diff string `json:"diff"`
NewPath string `json:"new_path"`
@@ -130,7 +210,7 @@ func (d Diff) String() string {
// GetCommitDiff gets the diff of a commit in a project..
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/commits.html#get-the-diff-of-a-commit
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md#get-the-diff-of-a-commit
func (s *CommitsService) GetCommitDiff(pid interface{}, sha string, options ...OptionFunc) ([]*Diff, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -154,7 +234,8 @@ func (s *CommitsService) GetCommitDiff(pid interface{}, sha string, options ...O
// CommitComment represents a GitLab commit comment.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/commits.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md
type CommitComment struct {
Note string `json:"note"`
Path string `json:"path"`
@@ -181,7 +262,7 @@ func (c CommitComment) String() string {
// GetCommitComments gets the comments of a commit in a project.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/commits.html#get-the-comments-of-a-commit
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md#get-the-comments-of-a-commit
func (s *CommitsService) GetCommitComments(pid interface{}, sha string, options ...OptionFunc) ([]*CommitComment, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -207,7 +288,7 @@ func (s *CommitsService) GetCommitComments(pid interface{}, sha string, options
// options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/commits.html#post-comment-to-commit
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md#post-comment-to-commit
type PostCommitCommentOptions struct {
Note *string `url:"note,omitempty" json:"note,omitempty"`
Path *string `url:"path" json:"path"`
@@ -220,7 +301,7 @@ type PostCommitCommentOptions struct {
// line_old are required.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/commits.html#post-comment-to-commit
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md#post-comment-to-commit
func (s *CommitsService) PostCommitComment(pid interface{}, sha string, opt *PostCommitCommentOptions, options ...OptionFunc) (*CommitComment, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -244,7 +325,8 @@ func (s *CommitsService) PostCommitComment(pid interface{}, sha string, opt *Pos
// GetCommitStatusesOptions represents the available GetCommitStatuses() options.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/commits.html#get-the-status-of-a-commit
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md#get-the-status-of-a-commit
type GetCommitStatusesOptions struct {
Ref *string `url:"ref,omitempty" json:"ref,omitempty"`
Stage *string `url:"stage,omitempty" json:"stage,omitempty"`
@@ -254,7 +336,8 @@ type GetCommitStatusesOptions struct {
// CommitStatus represents a GitLab commit status.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/commits.html#get-the-status-of-a-commit
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md#get-the-status-of-a-commit
type CommitStatus struct {
ID int `json:"id"`
SHA string `json:"sha"`
@@ -271,7 +354,8 @@ type CommitStatus struct {
// GetCommitStatuses gets the statuses of a commit in a project.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/commits.html#get-the-status-of-a-commit
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md#get-the-status-of-a-commit
func (s *CommitsService) GetCommitStatuses(pid interface{}, sha string, opt *GetCommitStatusesOptions, options ...OptionFunc) ([]*CommitStatus, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -295,7 +379,8 @@ func (s *CommitsService) GetCommitStatuses(pid interface{}, sha string, opt *Get
// SetCommitStatusOptions represents the available SetCommitStatus() options.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/commits.html#post-the-status-to-commit
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md#post-the-status-to-commit
type SetCommitStatusOptions struct {
State BuildState `url:"state" json:"state"`
Ref *string `url:"ref,omitempty" json:"ref,omitempty"`
@@ -305,10 +390,10 @@ type SetCommitStatusOptions struct {
Description *string `url:"description,omitempty" json:"description,omitempty"`
}
-// BuildState represents a GitLab build state
+// BuildState represents a GitLab build state.
type BuildState string
-// These constants represent all valid build states
+// These constants represent all valid build states.
const (
Pending BuildState = "pending"
Running BuildState = "running"
@@ -319,7 +404,8 @@ const (
// SetCommitStatus sets the status of a commit in a project.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/commits.html#post-the-status-to-commit
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md#post-the-status-to-commit
func (s *CommitsService) SetCommitStatus(pid interface{}, sha string, opt *SetCommitStatusOptions, options ...OptionFunc) (*CommitStatus, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -340,3 +426,37 @@ func (s *CommitsService) SetCommitStatus(pid interface{}, sha string, opt *SetCo
return cs, resp, err
}
+
+// CherryPickCommitOptions represents the available options for cherry-picking a commit.
+//
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md#cherry-pick-a-commit
+type CherryPickCommitOptions struct {
+ TargetBranch *string `url:"branch" json:"branch,omitempty"`
+}
+
+// CherryPickCommit sherry picks a commit to a given branch.
+//
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/commits.md#cherry-pick-a-commit
+func (s *CommitsService) CherryPickCommit(pid interface{}, sha string, opt *CherryPickCommitOptions, options ...OptionFunc) (*Commit, *Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, nil, err
+ }
+ u := fmt.Sprintf("projects/%s/repository/commits/%s/cherry_pick",
+ url.QueryEscape(project), url.QueryEscape(sha))
+
+ req, err := s.client.NewRequest("POST", u, opt, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var c *Commit
+ resp, err := s.client.Do(req, &c)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return c, resp, err
+}
diff --git a/vendor/github.com/xanzy/go-gitlab/deploy_keys.go b/vendor/github.com/xanzy/go-gitlab/deploy_keys.go
index e964e18..00bcf0d 100644
--- a/vendor/github.com/xanzy/go-gitlab/deploy_keys.go
+++ b/vendor/github.com/xanzy/go-gitlab/deploy_keys.go
@@ -25,7 +25,8 @@ import (
// DeployKeysService handles communication with the keys related methods
// of the GitLab API.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/deploy_keys.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/deploy_keys.md
type DeployKeysService struct {
client *Client
}
@@ -35,6 +36,7 @@ type DeployKey struct {
ID int `json:"id"`
Title string `json:"title"`
Key string `json:"key"`
+ CanPush *bool `json:"can_push"`
CreatedAt *time.Time `json:"created_at"`
}
@@ -45,7 +47,7 @@ func (k DeployKey) String() string {
// ListDeployKeys gets a list of a project's deploy keys
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/deploy_keys.html#list-deploy-keys
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/deploy_keys.md#list-deploy-keys
func (s *DeployKeysService) ListDeployKeys(pid interface{}, options ...OptionFunc) ([]*DeployKey, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -70,7 +72,7 @@ func (s *DeployKeysService) ListDeployKeys(pid interface{}, options ...OptionFun
// GetDeployKey gets a single deploy key.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/deploy_keys.html#single-deploy-key
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/deploy_keys.md#single-deploy-key
func (s *DeployKeysService) GetDeployKey(pid interface{}, deployKey int, options ...OptionFunc) (*DeployKey, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -95,10 +97,11 @@ func (s *DeployKeysService) GetDeployKey(pid interface{}, deployKey int, options
// AddDeployKeyOptions represents the available ADDDeployKey() options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/deploy_keys.html#add-deploy-key
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/deploy_keys.md#add-deploy-key
type AddDeployKeyOptions struct {
- Title *string `url:"title,omitempty" json:"title,omitempty"`
- Key *string `url:"key,omitempty" json:"key,omitempty"`
+ Title *string `url:"title,omitempty" json:"title,omitempty"`
+ Key *string `url:"key,omitempty" json:"key,omitempty"`
+ CanPush *bool `url:"can_push,omitempty" json:"can_push,omitempty"`
}
// AddDeployKey creates a new deploy key for a project. If deploy key already
@@ -106,7 +109,7 @@ type AddDeployKeyOptions struct {
// original one was is accessible by same user.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/deploy_keys.html#add-deploy-key
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/deploy_keys.md#add-deploy-key
func (s *DeployKeysService) AddDeployKey(pid interface{}, opt *AddDeployKeyOptions, options ...OptionFunc) (*DeployKey, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -131,7 +134,7 @@ func (s *DeployKeysService) AddDeployKey(pid interface{}, opt *AddDeployKeyOptio
// DeleteDeployKey deletes a deploy key from a project.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/deploy_keys.html#delete-deploy-key
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/deploy_keys.md#delete-deploy-key
func (s *DeployKeysService) DeleteDeployKey(pid interface{}, deployKey int, options ...OptionFunc) (*Response, error) {
project, err := parseID(pid)
if err != nil {
diff --git a/vendor/github.com/xanzy/go-gitlab/events.go b/vendor/github.com/xanzy/go-gitlab/events.go
index ef33459..7ba02df 100644
--- a/vendor/github.com/xanzy/go-gitlab/events.go
+++ b/vendor/github.com/xanzy/go-gitlab/events.go
@@ -21,7 +21,7 @@ import "time"
// PushEvent represents a push event.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/web_hooks/web_hooks.html#push-events
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/web_hooks/web_hooks.md#push-events
type PushEvent struct {
ObjectKind string `json:"object_kind"`
Before string `json:"before"`
@@ -57,7 +57,7 @@ type PushEvent struct {
// TagEvent represents a tag event.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/web_hooks/web_hooks.html#tag-events
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/web_hooks/web_hooks.md#tag-events
type TagEvent struct {
ObjectKind string `json:"object_kind"`
Before string `json:"before"`
@@ -92,7 +92,7 @@ type TagEvent struct {
// IssueEvent represents a issue event.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/web_hooks/web_hooks.html#issues-events
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/web_hooks/web_hooks.md#issues-events
type IssueEvent struct {
ObjectKind string `json:"object_kind"`
User *User `json:"user"`
@@ -140,7 +140,7 @@ type IssueEvent struct {
// CommitCommentEvent represents a comment on a commit event.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/web_hooks/web_hooks.html#comment-on-commit
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/web_hooks/web_hooks.md#comment-on-commit
type CommitCommentEvent struct {
ObjectKind string `json:"object_kind"`
User *User `json:"user"`
@@ -192,7 +192,7 @@ type CommitCommentEvent struct {
// MergeCommentEvent represents a comment on a merge event.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/web_hooks/web_hooks.html#comment-on-merge-request
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/web_hooks/web_hooks.md#comment-on-merge-request
type MergeCommentEvent struct {
ObjectKind string `json:"object_kind"`
User *User `json:"user"`
@@ -236,7 +236,7 @@ type MergeCommentEvent struct {
// IssueCommentEvent represents a comment on an issue event.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/web_hooks/web_hooks.html#comment-on-issue
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/web_hooks/web_hooks.md#comment-on-issue
type IssueCommentEvent struct {
ObjectKind string `json:"object_kind"`
User *User `json:"user"`
@@ -280,7 +280,7 @@ type IssueCommentEvent struct {
// SnippetCommentEvent represents a comment on a snippet event.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/web_hooks/web_hooks.html#comment-on-code-snippet
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/web_hooks/web_hooks.md#comment-on-code-snippet
type SnippetCommentEvent struct {
ObjectKind string `json:"object_kind"`
User *User `json:"user"`
@@ -324,7 +324,7 @@ type SnippetCommentEvent struct {
// MergeEvent represents a merge event.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/web_hooks/web_hooks.html#merge-request-events
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/web_hooks/web_hooks.md#merge-request-events
type MergeEvent struct {
ObjectKind string `json:"object_kind"`
User *User `json:"user"`
@@ -407,7 +407,7 @@ type MergeEvent struct {
// WikiPageEvent represents a wiki page event.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/web_hooks/web_hooks.html#wiki-page-events
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/web_hooks/web_hooks.md#wiki-page-events
type WikiPageEvent struct {
ObjectKind string `json:"object_kind"`
User *User `json:"user"`
@@ -448,7 +448,7 @@ type WikiPageEvent struct {
// PipelineEvent represents a pipeline event.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/web_hooks/web_hooks.html#pipeline-events
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/web_hooks/web_hooks.md#pipeline-events
type PipelineEvent struct {
ObjectKind string `json:"object_kind"`
ObjectAttributes struct {
@@ -520,7 +520,7 @@ type PipelineEvent struct {
//BuildEvent represents a build event
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/web_hooks/web_hooks.html#build-events
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/web_hooks/web_hooks.md#build-events
type BuildEvent struct {
ObjectKind string `json:"object_kind"`
Ref string `json:"ref"`
diff --git a/vendor/github.com/xanzy/go-gitlab/gitlab.go b/vendor/github.com/xanzy/go-gitlab/gitlab.go
index 35d65a4..c2da4c3 100644
--- a/vendor/github.com/xanzy/go-gitlab/gitlab.go
+++ b/vendor/github.com/xanzy/go-gitlab/gitlab.go
@@ -18,6 +18,7 @@ package gitlab
import (
"bytes"
+ "context"
"encoding/json"
"fmt"
"io"
@@ -39,12 +40,14 @@ const (
// tokenType represents a token type within GitLab.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/
type tokenType int
// List of available token type
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/
const (
privateToken tokenType = iota
oAuthToken
@@ -52,12 +55,14 @@ const (
// AccessLevelValue represents a permission level within GitLab.
//
-// GitLab API docs: https://docs.gitlab.com/ce/permissions/permissions.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/permissions/permissions.md
type AccessLevelValue int
// List of available access levels
//
-// GitLab API docs: https://docs.gitlab.com/ce/permissions/permissions.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/permissions/permissions.md
const (
GuestPermissions AccessLevelValue = 10
ReporterPermissions AccessLevelValue = 20
@@ -128,12 +133,14 @@ var notificationLevelTypes = map[string]NotificationLevelValue{
// VisibilityLevelValue represents a visibility level within GitLab.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/
type VisibilityLevelValue int
// List of available visibility levels
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/
const (
PrivateVisibility VisibilityLevelValue = 0
InternalVisibility VisibilityLevelValue = 10
@@ -448,7 +455,7 @@ func parseID(id interface{}) (string, error) {
// An ErrorResponse reports one or more errors caused by an API request.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/README.html#data-validation-and-error-reporting
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/README.md#data-validation-and-error-reporting
type ErrorResponse struct {
Response *http.Response
Message string
@@ -517,7 +524,7 @@ func parseError(raw interface{}) string {
errs = append(errs, fmt.Sprintf("{%s: %s}", k, parseError(v)))
}
sort.Strings(errs)
- return fmt.Sprintf("%s", strings.Join(errs, ", "))
+ return strings.Join(errs, ", ")
default:
return fmt.Sprintf("failed to parse unexpected error type: %T", raw)
@@ -527,7 +534,7 @@ func parseError(raw interface{}) string {
// OptionFunc can be passed to all API requests to make the API call as if you were
// another user, provided your private token is from an administrator account.
//
-// GitLab docs: https://docs.gitlab.com/ce/api/README.html#sudo
+// GitLab docs: https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/README.md#sudo
type OptionFunc func(*http.Request) error
// WithSudo takes either a username or user ID and sets the SUDO request header
@@ -546,6 +553,14 @@ func WithSudo(uid interface{}) OptionFunc {
}
}
+// WithContext runs the request with the provided context
+func WithContext(ctx context.Context) OptionFunc {
+ return func(req *http.Request) error {
+ *req = *req.WithContext(ctx)
+ return nil
+ }
+}
+
// Bool is a helper routine that allocates a new bool value
// to store v and returns a pointer to it.
func Bool(v bool) *bool {
diff --git a/vendor/github.com/xanzy/go-gitlab/groups.go b/vendor/github.com/xanzy/go-gitlab/groups.go
index 060079e..6a42dde 100644
--- a/vendor/github.com/xanzy/go-gitlab/groups.go
+++ b/vendor/github.com/xanzy/go-gitlab/groups.go
@@ -24,34 +24,39 @@ import (
// GroupsService handles communication with the group related methods of
// the GitLab API.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/groups.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/groups.md
type GroupsService struct {
client *Client
}
// Group represents a GitLab group.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/groups.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/groups.md
type Group struct {
- ID int `json:"id"`
- Name string `json:"name"`
- Path string `json:"path"`
- Description string `json:"description"`
- Projects *[]Project `json:"projects,omitempty"`
+ ID int `json:"id"`
+ Name string `json:"name"`
+ Path string `json:"path"`
+ Description string `json:"description"`
+ Projects []*Project `json:"projects"`
+ Statistics *StorageStatistics `json:"statistics"`
}
// ListGroupsOptions represents the available ListGroups() options.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/groups.html#list-project-groups
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/groups.md#list-project-groups
type ListGroupsOptions struct {
ListOptions
- Search *string `url:"search,omitempty" json:"search,omitempty"`
+ Search *string `url:"search,omitempty" json:"search,omitempty"`
+ Statistics *bool `url:"statistics,omitempty" json:"statistics,omitempty"`
}
// ListGroups gets a list of groups. (As user: my groups, as admin: all groups)
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/groups.html#list-project-groups
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/groups.md#list-project-groups
func (s *GroupsService) ListGroups(opt *ListGroupsOptions, options ...OptionFunc) ([]*Group, *Response, error) {
req, err := s.client.NewRequest("GET", "groups", opt, options)
if err != nil {
@@ -69,7 +74,8 @@ func (s *GroupsService) ListGroups(opt *ListGroupsOptions, options ...OptionFunc
// GetGroup gets all details of a group.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/groups.html#details-of-a-group
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/groups.md#details-of-a-group
func (s *GroupsService) GetGroup(gid interface{}, options ...OptionFunc) (*Group, *Response, error) {
group, err := parseID(gid)
if err != nil {
@@ -93,7 +99,8 @@ func (s *GroupsService) GetGroup(gid interface{}, options ...OptionFunc) (*Group
// CreateGroupOptions represents the available CreateGroup() options.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/groups.html#new-group
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/groups.md#new-group
type CreateGroupOptions struct {
Name *string `url:"name,omitempty" json:"name,omitempty"`
Path *string `url:"path,omitempty" json:"path,omitempty"`
@@ -104,7 +111,8 @@ type CreateGroupOptions struct {
// CreateGroup creates a new project group. Available only for users who can
// create groups.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/groups.html#new-group
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/groups.md#new-group
func (s *GroupsService) CreateGroup(opt *CreateGroupOptions, options ...OptionFunc) (*Group, *Response, error) {
req, err := s.client.NewRequest("POST", "groups", opt, options)
if err != nil {
@@ -124,7 +132,7 @@ func (s *GroupsService) CreateGroup(opt *CreateGroupOptions, options ...OptionFu
// for admin.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/groups.html#transfer-project-to-group
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/groups.md#transfer-project-to-group
func (s *GroupsService) TransferGroup(gid interface{}, project int, options ...OptionFunc) (*Group, *Response, error) {
group, err := parseID(gid)
if err != nil {
@@ -148,7 +156,8 @@ func (s *GroupsService) TransferGroup(gid interface{}, project int, options ...O
// DeleteGroup removes group with all projects inside.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/groups.html#remove-group
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/groups.md#remove-group
func (s *GroupsService) DeleteGroup(gid interface{}, options ...OptionFunc) (*Response, error) {
group, err := parseID(gid)
if err != nil {
@@ -166,7 +175,8 @@ func (s *GroupsService) DeleteGroup(gid interface{}, options ...OptionFunc) (*Re
// SearchGroup get all groups that match your string in their name or path.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/groups.html#search-for-group
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/groups.md#search-for-group
func (s *GroupsService) SearchGroup(query string, options ...OptionFunc) ([]*Group, *Response, error) {
var q struct {
Search string `url:"search,omitempty" json:"search,omitempty"`
@@ -189,7 +199,8 @@ func (s *GroupsService) SearchGroup(query string, options ...OptionFunc) ([]*Gro
// GroupMember represents a GitLab group member.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/groups.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/groups.md
type GroupMember struct {
ID int `json:"id"`
Username string `json:"username"`
@@ -204,7 +215,7 @@ type GroupMember struct {
// options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/groups.html#list-group-members
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/groups.md#list-group-members
type ListGroupMembersOptions struct {
ListOptions
}
@@ -213,7 +224,7 @@ type ListGroupMembersOptions struct {
// user.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/groups.html#list-group-members
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/groups.md#list-group-members
func (s *GroupsService) ListGroupMembers(gid interface{}, opt *ListGroupMembersOptions, options ...OptionFunc) ([]*GroupMember, *Response, error) {
group, err := parseID(gid)
if err != nil {
@@ -239,7 +250,7 @@ func (s *GroupsService) ListGroupMembers(gid interface{}, opt *ListGroupMembersO
// options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/groups.html#list-a-group-s-projects
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/groups.md#list-a-group-s-projects
type ListGroupProjectsOptions struct {
ListOptions
}
@@ -247,7 +258,7 @@ type ListGroupProjectsOptions struct {
// ListGroupProjects get a list of group projects
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/groups.html#list-a-group-s-projects
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/groups.md#list-a-group-s-projects
func (s *GroupsService) ListGroupProjects(gid interface{}, opt *ListGroupProjectsOptions, options ...OptionFunc) ([]*Project, *Response, error) {
group, err := parseID(gid)
if err != nil {
@@ -271,7 +282,8 @@ func (s *GroupsService) ListGroupProjects(gid interface{}, opt *ListGroupProject
// AddGroupMemberOptions represents the available AddGroupMember() options.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/groups.html#add-group-member
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/groups.md#add-group-member
type AddGroupMemberOptions struct {
UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"`
AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"`
@@ -280,7 +292,7 @@ type AddGroupMemberOptions struct {
// AddGroupMember adds a user to the list of group members.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/groups.html#list-group-members
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/groups.md#list-group-members
func (s *GroupsService) AddGroupMember(gid interface{}, opt *AddGroupMemberOptions, options ...OptionFunc) (*GroupMember, *Response, error) {
group, err := parseID(gid)
if err != nil {
@@ -306,7 +318,7 @@ func (s *GroupsService) AddGroupMember(gid interface{}, opt *AddGroupMemberOptio
// options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/groups.html#edit-group-team-member
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/groups.md#edit-group-team-member
type UpdateGroupMemberOptions struct {
AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"`
}
@@ -314,7 +326,7 @@ type UpdateGroupMemberOptions struct {
// UpdateGroupMember updates a group team member to a specified access level.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/groups.html#list-group-members
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/groups.md#list-group-members
func (s *GroupsService) UpdateGroupMember(gid interface{}, user int, opt *UpdateGroupMemberOptions, options ...OptionFunc) (*GroupMember, *Response, error) {
group, err := parseID(gid)
if err != nil {
@@ -339,7 +351,7 @@ func (s *GroupsService) UpdateGroupMember(gid interface{}, user int, opt *Update
// RemoveGroupMember removes user from user team.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/groups.html#remove-user-from-user-team
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/groups.md#remove-user-from-user-team
func (s *GroupsService) RemoveGroupMember(gid interface{}, user int, options ...OptionFunc) (*Response, error) {
group, err := parseID(gid)
if err != nil {
diff --git a/vendor/github.com/xanzy/go-gitlab/issues.go b/vendor/github.com/xanzy/go-gitlab/issues.go
index 0a286c2..62b2d8f 100644
--- a/vendor/github.com/xanzy/go-gitlab/issues.go
+++ b/vendor/github.com/xanzy/go-gitlab/issues.go
@@ -27,14 +27,16 @@ import (
// IssuesService handles communication with the issue related methods
// of the GitLab API.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/issues.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md
type IssuesService struct {
client *Client
}
// Issue represents a GitLab issue.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/issues.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md
type Issue struct {
ID int `json:"id"`
IID int `json:"iid"`
@@ -83,7 +85,8 @@ func (l *Labels) MarshalJSON() ([]byte, error) {
// ListIssuesOptions represents the available ListIssues() options.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/issues.html#list-issues
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md#list-issues
type ListIssuesOptions struct {
ListOptions
State *string `url:"state,omitempty" json:"state,omitempty"`
@@ -95,7 +98,8 @@ type ListIssuesOptions struct {
// ListIssues gets all issues created by authenticated user. This function
// takes pagination parameters page and per_page to restrict the list of issues.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/issues.html#list-issues
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md#list-issues
func (s *IssuesService) ListIssues(opt *ListIssuesOptions, options ...OptionFunc) ([]*Issue, *Response, error) {
req, err := s.client.NewRequest("GET", "issues", opt, options)
if err != nil {
@@ -113,7 +117,8 @@ func (s *IssuesService) ListIssues(opt *ListIssuesOptions, options ...OptionFunc
// ListProjectIssuesOptions represents the available ListProjectIssues() options.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/issues.html#list-issues
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md#list-issues
type ListProjectIssuesOptions struct {
ListOptions
IID *int `url:"iid,omitempty" json:"iid,omitempty"`
@@ -127,7 +132,8 @@ type ListProjectIssuesOptions struct {
// ListProjectIssues gets a list of project issues. This function accepts
// pagination parameters page and per_page to return the list of project issues.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/issues.html#list-project-issues
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md#list-project-issues
func (s *IssuesService) ListProjectIssues(pid interface{}, opt *ListProjectIssuesOptions, options ...OptionFunc) ([]*Issue, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -151,7 +157,8 @@ func (s *IssuesService) ListProjectIssues(pid interface{}, opt *ListProjectIssue
// GetIssue gets a single project issue.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/issues.html#single-issues
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md#single-issues
func (s *IssuesService) GetIssue(pid interface{}, issue int, options ...OptionFunc) (*Issue, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -175,7 +182,8 @@ func (s *IssuesService) GetIssue(pid interface{}, issue int, options ...OptionFu
// CreateIssueOptions represents the available CreateIssue() options.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/issues.html#new-issues
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md#new-issues
type CreateIssueOptions struct {
Title *string `url:"title,omitempty" json:"title,omitempty"`
Description *string `url:"description,omitempty" json:"description,omitempty"`
@@ -186,7 +194,8 @@ type CreateIssueOptions struct {
// CreateIssue creates a new project issue.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/issues.html#new-issues
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md#new-issues
func (s *IssuesService) CreateIssue(pid interface{}, opt *CreateIssueOptions, options ...OptionFunc) (*Issue, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -210,7 +219,8 @@ func (s *IssuesService) CreateIssue(pid interface{}, opt *CreateIssueOptions, op
// UpdateIssueOptions represents the available UpdateIssue() options.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/issues.html#edit-issues
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md#edit-issues
type UpdateIssueOptions struct {
Title *string `url:"title,omitempty" json:"title,omitempty"`
Description *string `url:"description,omitempty" json:"description,omitempty"`
@@ -223,7 +233,8 @@ type UpdateIssueOptions struct {
// UpdateIssue updates an existing project issue. This function is also used
// to mark an issue as closed.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/issues.html#edit-issues
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md#edit-issues
func (s *IssuesService) UpdateIssue(pid interface{}, issue int, opt *UpdateIssueOptions, options ...OptionFunc) (*Issue, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -247,7 +258,8 @@ func (s *IssuesService) UpdateIssue(pid interface{}, issue int, opt *UpdateIssue
// DeleteIssue deletes a single project issue.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/issues.html#delete-an-issue
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md#delete-an-issue
func (s *IssuesService) DeleteIssue(pid interface{}, issue int, options ...OptionFunc) (*Response, error) {
project, err := parseID(pid)
if err != nil {
diff --git a/vendor/github.com/xanzy/go-gitlab/labels.go b/vendor/github.com/xanzy/go-gitlab/labels.go
index 8d571b7..3c936ff 100644
--- a/vendor/github.com/xanzy/go-gitlab/labels.go
+++ b/vendor/github.com/xanzy/go-gitlab/labels.go
@@ -24,14 +24,16 @@ import (
// LabelsService handles communication with the label related methods
// of the GitLab API.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/labels.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/labels.md
type LabelsService struct {
client *Client
}
// Label represents a GitLab label.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/labels.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/labels.md
type Label struct {
Name string `json:"name"`
Color string `json:"color"`
@@ -47,7 +49,8 @@ func (l Label) String() string {
// ListLabels gets all labels for given project.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/labels.html#list-labels
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/labels.md#list-labels
func (s *LabelsService) ListLabels(pid interface{}, options ...OptionFunc) ([]*Label, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -71,7 +74,8 @@ func (s *LabelsService) ListLabels(pid interface{}, options ...OptionFunc) ([]*L
// CreateLabelOptions represents the available CreateLabel() options.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/labels.html#create-a-new-label
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/labels.md#create-a-new-label
type CreateLabelOptions struct {
Name *string `url:"name,omitempty" json:"name,omitempty"`
Color *string `url:"color,omitempty" json:"color,omitempty"`
@@ -81,7 +85,8 @@ type CreateLabelOptions struct {
// CreateLabel creates a new label for given repository with given name and
// color.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/labels.html#create-a-new-label
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/labels.md#create-a-new-label
func (s *LabelsService) CreateLabel(pid interface{}, opt *CreateLabelOptions, options ...OptionFunc) (*Label, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -105,14 +110,16 @@ func (s *LabelsService) CreateLabel(pid interface{}, opt *CreateLabelOptions, op
// DeleteLabelOptions represents the available DeleteLabel() options.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/labels.html#delete-a-label
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/labels.md#delete-a-label
type DeleteLabelOptions struct {
Name *string `url:"name,omitempty" json:"name,omitempty"`
}
// DeleteLabel deletes a label given by its name.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/labels.html#delete-a-label
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/labels.md#delete-a-label
func (s *LabelsService) DeleteLabel(pid interface{}, opt *DeleteLabelOptions, options ...OptionFunc) (*Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -130,7 +137,8 @@ func (s *LabelsService) DeleteLabel(pid interface{}, opt *DeleteLabelOptions, op
// UpdateLabelOptions represents the available UpdateLabel() options.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/labels.html#delete-a-label
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/labels.md#delete-a-label
type UpdateLabelOptions struct {
Name *string `url:"name,omitempty" json:"name,omitempty"`
NewName *string `url:"new_name,omitempty" json:"new_name,omitempty"`
@@ -141,7 +149,8 @@ type UpdateLabelOptions struct {
// UpdateLabel updates an existing label with new name or now color. At least
// one parameter is required, to update the label.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/labels.html#edit-an-existing-label
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/labels.md#edit-an-existing-label
func (s *LabelsService) UpdateLabel(pid interface{}, opt *UpdateLabelOptions, options ...OptionFunc) (*Label, *Response, error) {
project, err := parseID(pid)
if err != nil {
diff --git a/vendor/github.com/xanzy/go-gitlab/merge_requests.go b/vendor/github.com/xanzy/go-gitlab/merge_requests.go
index 1d15406..34405d8 100644
--- a/vendor/github.com/xanzy/go-gitlab/merge_requests.go
+++ b/vendor/github.com/xanzy/go-gitlab/merge_requests.go
@@ -25,14 +25,16 @@ import (
// MergeRequestsService handles communication with the merge requests related
// methods of the GitLab API.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/merge_requests.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/merge_requests.md
type MergeRequestsService struct {
client *Client
}
// MergeRequest represents a GitLab merge request.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/merge_requests.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/merge_requests.md
type MergeRequest struct {
ID int `json:"id"`
IID int `json:"iid"`
@@ -102,7 +104,7 @@ func (m MergeRequest) String() string {
// options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/merge_requests.html#list-merge-requests
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/merge_requests.md#list-merge-requests
type ListMergeRequestsOptions struct {
ListOptions
IID *int `url:"iid,omitempty" json:"iid,omitempty"`
@@ -117,7 +119,7 @@ type ListMergeRequestsOptions struct {
// per_page can be used to restrict the list of merge requests.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/merge_requests.html#list-merge-requests
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/merge_requests.md#list-merge-requests
func (s *MergeRequestsService) ListMergeRequests(pid interface{}, opt *ListMergeRequestsOptions, options ...OptionFunc) ([]*MergeRequest, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -142,7 +144,7 @@ func (s *MergeRequestsService) ListMergeRequests(pid interface{}, opt *ListMerge
// GetMergeRequest shows information about a single merge request.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/merge_requests.html#get-single-mr
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/merge_requests.md#get-single-mr
func (s *MergeRequestsService) GetMergeRequest(pid interface{}, mergeRequest int, options ...OptionFunc) (*MergeRequest, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -167,7 +169,7 @@ func (s *MergeRequestsService) GetMergeRequest(pid interface{}, mergeRequest int
// GetMergeRequestCommits gets a list of merge request commits.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/merge_requests.html#get-single-mr-commits
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/merge_requests.md#get-single-mr-commits
func (s *MergeRequestsService) GetMergeRequestCommits(pid interface{}, mergeRequest int, options ...OptionFunc) ([]*Commit, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -193,7 +195,7 @@ func (s *MergeRequestsService) GetMergeRequestCommits(pid interface{}, mergeRequ
// its files and changes.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/merge_requests.html#get-single-mr-changes
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/merge_requests.md#get-single-mr-changes
func (s *MergeRequestsService) GetMergeRequestChanges(pid interface{}, mergeRequest int, options ...OptionFunc) (*MergeRequest, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -219,7 +221,7 @@ func (s *MergeRequestsService) GetMergeRequestChanges(pid interface{}, mergeRequ
// options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/merge_requests.html#create-mr
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/merge_requests.md#create-mr
type CreateMergeRequestOptions struct {
Title *string `url:"title,omitempty" json:"title,omitempty"`
Description *string `url:"description,omitempty" json:"description,omitempty"`
@@ -232,7 +234,7 @@ type CreateMergeRequestOptions struct {
// CreateMergeRequest creates a new merge request.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/merge_requests.html#create-mr
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/merge_requests.md#create-mr
func (s *MergeRequestsService) CreateMergeRequest(pid interface{}, opt *CreateMergeRequestOptions, options ...OptionFunc) (*MergeRequest, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -258,7 +260,7 @@ func (s *MergeRequestsService) CreateMergeRequest(pid interface{}, opt *CreateMe
// options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/merge_requests.html#update-mr
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/merge_requests.md#update-mr
type UpdateMergeRequestOptions struct {
Title *string `url:"title,omitempty" json:"title,omitempty"`
Description *string `url:"description,omitempty" json:"description,omitempty"`
@@ -270,7 +272,7 @@ type UpdateMergeRequestOptions struct {
// UpdateMergeRequest updates an existing project milestone.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/merge_requests.html#update-mr
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/merge_requests.md#update-mr
func (s *MergeRequestsService) UpdateMergeRequest(pid interface{}, mergeRequest int, opt *UpdateMergeRequestOptions, options ...OptionFunc) (*MergeRequest, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -296,7 +298,7 @@ func (s *MergeRequestsService) UpdateMergeRequest(pid interface{}, mergeRequest
// options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/merge_requests.html#accept-mr
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/merge_requests.md#accept-mr
type AcceptMergeRequestOptions struct {
MergeCommitMessage *string `url:"merge_commit_message,omitempty" json:"merge_commit_message,omitempty"`
ShouldRemoveSourceBranch *bool `url:"should_remove_source_branch,omitempty" json:"should_remove_source_branch,omitempty"`
@@ -310,7 +312,7 @@ type AcceptMergeRequestOptions struct {
// already merged or closed - you get 405 and error message 'Method Not Allowed'
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/merge_requests.html#accept-mr
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/merge_requests.md#accept-mr
func (s *MergeRequestsService) AcceptMergeRequest(pid interface{}, mergeRequest int, opt *AcceptMergeRequestOptions, options ...OptionFunc) (*MergeRequest, *Response, error) {
project, err := parseID(pid)
if err != nil {
diff --git a/vendor/github.com/xanzy/go-gitlab/milestones.go b/vendor/github.com/xanzy/go-gitlab/milestones.go
index eeaf9cc..6c04d7b 100644
--- a/vendor/github.com/xanzy/go-gitlab/milestones.go
+++ b/vendor/github.com/xanzy/go-gitlab/milestones.go
@@ -25,14 +25,16 @@ import (
// MilestonesService handles communication with the milestone related methods
// of the GitLab API.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/milestones.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/milestones.md
type MilestonesService struct {
client *Client
}
// Milestone represents a GitLab milestone.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/milestones.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/milestones.md
type Milestone struct {
ID int `json:"id"`
Iid int `json:"iid"`
@@ -53,7 +55,7 @@ func (m Milestone) String() string {
// ListMilestonesOptions represents the available ListMilestones() options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/milestones.html#list-project-milestones
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/milestones.md#list-project-milestones
type ListMilestonesOptions struct {
ListOptions
IID *int `url:"iid,omitempty" json:"iid,omitempty"`
@@ -62,7 +64,7 @@ type ListMilestonesOptions struct {
// ListMilestones returns a list of project milestones.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/milestones.html#list-project-milestones
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/milestones.md#list-project-milestones
func (s *MilestonesService) ListMilestones(pid interface{}, opt *ListMilestonesOptions, options ...OptionFunc) ([]*Milestone, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -87,7 +89,7 @@ func (s *MilestonesService) ListMilestones(pid interface{}, opt *ListMilestonesO
// GetMilestone gets a single project milestone.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/milestones.html#get-single-milestone
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/milestones.md#get-single-milestone
func (s *MilestonesService) GetMilestone(pid interface{}, milestone int, options ...OptionFunc) (*Milestone, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -112,7 +114,7 @@ func (s *MilestonesService) GetMilestone(pid interface{}, milestone int, options
// CreateMilestoneOptions represents the available CreateMilestone() options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/milestones.html#create-new-milestone
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/milestones.md#create-new-milestone
type CreateMilestoneOptions struct {
Title *string `url:"title,omitempty" json:"title,omitempty"`
Description *string `url:"description,omitempty" json:"description,omitempty"`
@@ -123,7 +125,7 @@ type CreateMilestoneOptions struct {
// CreateMilestone creates a new project milestone.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/milestones.html#create-new-milestone
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/milestones.md#create-new-milestone
func (s *MilestonesService) CreateMilestone(pid interface{}, opt *CreateMilestoneOptions, options ...OptionFunc) (*Milestone, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -148,7 +150,7 @@ func (s *MilestonesService) CreateMilestone(pid interface{}, opt *CreateMileston
// UpdateMilestoneOptions represents the available UpdateMilestone() options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/milestones.html#edit-milestone
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/milestones.md#edit-milestone
type UpdateMilestoneOptions struct {
Title *string `url:"title,omitempty" json:"title,omitempty"`
Description *string `url:"description,omitempty" json:"description,omitempty"`
@@ -160,7 +162,7 @@ type UpdateMilestoneOptions struct {
// UpdateMilestone updates an existing project milestone.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/milestones.html#edit-milestone
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/milestones.md#edit-milestone
func (s *MilestonesService) UpdateMilestone(pid interface{}, milestone int, opt *UpdateMilestoneOptions, options ...OptionFunc) (*Milestone, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -185,7 +187,7 @@ func (s *MilestonesService) UpdateMilestone(pid interface{}, milestone int, opt
// GetMilestoneIssuesOptions represents the available GetMilestoneIssues() options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/milestones.html#get-all-issues-assigned-to-a-single-milestone
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/milestones.md#get-all-issues-assigned-to-a-single-milestone
type GetMilestoneIssuesOptions struct {
ListOptions
}
@@ -193,7 +195,7 @@ type GetMilestoneIssuesOptions struct {
// GetMilestoneIssues gets all issues assigned to a single project milestone.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/milestones.html#get-all-issues-assigned-to-a-single-milestone
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/milestones.md#get-all-issues-assigned-to-a-single-milestone
func (s *MilestonesService) GetMilestoneIssues(pid interface{}, milestone int, opt *GetMilestoneIssuesOptions, options ...OptionFunc) ([]*Issue, *Response, error) {
project, err := parseID(pid)
if err != nil {
diff --git a/vendor/github.com/xanzy/go-gitlab/namespaces.go b/vendor/github.com/xanzy/go-gitlab/namespaces.go
index d4b5e45..090f61b 100644
--- a/vendor/github.com/xanzy/go-gitlab/namespaces.go
+++ b/vendor/github.com/xanzy/go-gitlab/namespaces.go
@@ -19,14 +19,16 @@ package gitlab
// NamespacesService handles communication with the namespace related methods
// of the GitLab API.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/namespaces.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/namespaces.md
type NamespacesService struct {
client *Client
}
// Namespace represents a GitLab namespace.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/namespaces.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/namespaces.md
type Namespace struct {
ID int `json:"id"`
Path string `json:"path"`
@@ -39,7 +41,8 @@ func (n Namespace) String() string {
// ListNamespacesOptions represents the available ListNamespaces() options.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/namespaces.html#list-namespaces
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/namespaces.md#list-namespaces
type ListNamespacesOptions struct {
ListOptions
Search *string `url:"search,omitempty" json:"search,omitempty"`
@@ -47,7 +50,8 @@ type ListNamespacesOptions struct {
// ListNamespaces gets a list of projects accessible by the authenticated user.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/namespaces.html#list-namespaces
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/namespaces.md#list-namespaces
func (s *NamespacesService) ListNamespaces(opt *ListNamespacesOptions, options ...OptionFunc) ([]*Namespace, *Response, error) {
req, err := s.client.NewRequest("GET", "namespaces", opt, options)
if err != nil {
@@ -67,7 +71,7 @@ func (s *NamespacesService) ListNamespaces(opt *ListNamespacesOptions, options .
// or path.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/namespaces.html#search-for-namespace
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/namespaces.md#search-for-namespace
func (s *NamespacesService) SearchNamespace(query string, options ...OptionFunc) ([]*Namespace, *Response, error) {
var q struct {
Search string `url:"search,omitempty" json:"search,omitempty"`
diff --git a/vendor/github.com/xanzy/go-gitlab/notes.go b/vendor/github.com/xanzy/go-gitlab/notes.go
index c1836c2..2f11df4 100644
--- a/vendor/github.com/xanzy/go-gitlab/notes.go
+++ b/vendor/github.com/xanzy/go-gitlab/notes.go
@@ -25,14 +25,16 @@ import (
// NotesService handles communication with the notes related methods
// of the GitLab API.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/notes.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md
type NotesService struct {
client *Client
}
// Note represents a GitLab note.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/notes.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md
type Note struct {
ID int `json:"id"`
Body string `json:"body"`
@@ -59,7 +61,7 @@ func (n Note) String() string {
// ListIssueNotesOptions represents the available ListIssueNotes() options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/notes.html#list-project-issue-notes
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md#list-project-issue-notes
type ListIssueNotesOptions struct {
ListOptions
}
@@ -67,7 +69,7 @@ type ListIssueNotesOptions struct {
// ListIssueNotes gets a list of all notes for a single issue.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/notes.html#list-project-issue-notes
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md#list-project-issue-notes
func (s *NotesService) ListIssueNotes(pid interface{}, issue int, opt *ListIssueNotesOptions, options ...OptionFunc) ([]*Note, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -92,7 +94,7 @@ func (s *NotesService) ListIssueNotes(pid interface{}, issue int, opt *ListIssue
// GetIssueNote returns a single note for a specific project issue.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/notes.html#get-single-issue-note
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md#get-single-issue-note
func (s *NotesService) GetIssueNote(pid interface{}, issue int, note int, options ...OptionFunc) (*Note, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -118,7 +120,7 @@ func (s *NotesService) GetIssueNote(pid interface{}, issue int, note int, option
// options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/notes.html#create-new-issue-note
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md#create-new-issue-note
type CreateIssueNoteOptions struct {
Body *string `url:"body,omitempty" json:"body,omitempty"`
}
@@ -126,7 +128,7 @@ type CreateIssueNoteOptions struct {
// CreateIssueNote creates a new note to a single project issue.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/notes.html#create-new-issue-note
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md#create-new-issue-note
func (s *NotesService) CreateIssueNote(pid interface{}, issue int, opt *CreateIssueNoteOptions, options ...OptionFunc) (*Note, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -152,14 +154,14 @@ func (s *NotesService) CreateIssueNote(pid interface{}, issue int, opt *CreateIs
// options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/notes.html#modify-existing-issue-note
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md#modify-existing-issue-note
type UpdateIssueNoteOptions struct {
Body *string `url:"body,omitempty" json:"body,omitempty"`
}
// UpdateIssueNote modifies existing note of an issue.
//
-// https://docs.gitlab.com/ce/api/notes.html#modify-existing-issue-note
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md#modify-existing-issue-note
func (s *NotesService) UpdateIssueNote(pid interface{}, issue int, note int, opt *UpdateIssueNoteOptions, options ...OptionFunc) (*Note, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -185,7 +187,7 @@ func (s *NotesService) UpdateIssueNote(pid interface{}, issue int, note int, opt
// notes are comments users can post to a snippet.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/notes.html#list-all-snippet-notes
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md#list-all-snippet-notes
func (s *NotesService) ListSnippetNotes(pid interface{}, snippet int, options ...OptionFunc) ([]*Note, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -210,7 +212,7 @@ func (s *NotesService) ListSnippetNotes(pid interface{}, snippet int, options ..
// GetSnippetNote returns a single note for a given snippet.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/notes.html#get-single-snippet-note
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md#get-single-snippet-note
func (s *NotesService) GetSnippetNote(pid interface{}, snippet int, note int, options ...OptionFunc) (*Note, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -236,7 +238,7 @@ func (s *NotesService) GetSnippetNote(pid interface{}, snippet int, note int, op
// options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/notes.html#create-new-snippet-note
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md#create-new-snippet-note
type CreateSnippetNoteOptions struct {
Body *string `url:"body,omitempty" json:"body,omitempty"`
}
@@ -245,7 +247,7 @@ type CreateSnippetNoteOptions struct {
// comments users can post to a snippet.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/notes.html#create-new-snippet-note
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md#create-new-snippet-note
func (s *NotesService) CreateSnippetNote(pid interface{}, snippet int, opt *CreateSnippetNoteOptions, options ...OptionFunc) (*Note, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -271,14 +273,14 @@ func (s *NotesService) CreateSnippetNote(pid interface{}, snippet int, opt *Crea
// options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/notes.html#modify-existing-snippet-note
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md#modify-existing-snippet-note
type UpdateSnippetNoteOptions struct {
Body *string `url:"body,omitempty" json:"body,omitempty"`
}
// UpdateSnippetNote modifies existing note of a snippet.
//
-// https://docs.gitlab.com/ce/api/notes.html#modify-existing-snippet-note
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md#modify-existing-snippet-note
func (s *NotesService) UpdateSnippetNote(pid interface{}, snippet int, note int, opt *UpdateSnippetNoteOptions, options ...OptionFunc) (*Note, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -303,7 +305,7 @@ func (s *NotesService) UpdateSnippetNote(pid interface{}, snippet int, note int,
// ListMergeRequestNotes gets a list of all notes for a single merge request.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/notes.html#list-all-merge-request-notes
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md#list-all-merge-request-notes
func (s *NotesService) ListMergeRequestNotes(pid interface{}, mergeRequest int, options ...OptionFunc) ([]*Note, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -328,7 +330,7 @@ func (s *NotesService) ListMergeRequestNotes(pid interface{}, mergeRequest int,
// GetMergeRequestNote returns a single note for a given merge request.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/notes.html#get-single-merge-request-note
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md#get-single-merge-request-note
func (s *NotesService) GetMergeRequestNote(pid interface{}, mergeRequest int, note int, options ...OptionFunc) (*Note, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -354,7 +356,7 @@ func (s *NotesService) GetMergeRequestNote(pid interface{}, mergeRequest int, no
// CreateMergeRequestNote() options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/notes.html#create-new-merge-request-note
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md#create-new-merge-request-note
type CreateMergeRequestNoteOptions struct {
Body *string `url:"body,omitempty" json:"body,omitempty"`
}
@@ -362,7 +364,7 @@ type CreateMergeRequestNoteOptions struct {
// CreateMergeRequestNote creates a new note for a single merge request.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/notes.html#create-new-merge-request-note
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md#create-new-merge-request-note
func (s *NotesService) CreateMergeRequestNote(pid interface{}, mergeRequest int, opt *CreateMergeRequestNoteOptions, options ...OptionFunc) (*Note, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -388,14 +390,14 @@ func (s *NotesService) CreateMergeRequestNote(pid interface{}, mergeRequest int,
// UpdateMergeRequestNote() options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/notes.html#modify-existing-merge-request-note
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md#modify-existing-merge-request-note
type UpdateMergeRequestNoteOptions struct {
Body *string `url:"body,omitempty" json:"body,omitempty"`
}
// UpdateMergeRequestNote modifies existing note of a merge request.
//
-// https://docs.gitlab.com/ce/api/notes.html#modify-existing-merge-request-note
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notes.md#modify-existing-merge-request-note
func (s *NotesService) UpdateMergeRequestNote(pid interface{}, mergeRequest int, note int, opt *UpdateMergeRequestNoteOptions, options ...OptionFunc) (*Note, *Response, error) {
project, err := parseID(pid)
if err != nil {
diff --git a/vendor/github.com/xanzy/go-gitlab/notifications.go b/vendor/github.com/xanzy/go-gitlab/notifications.go
index 04424c2..544409c 100644
--- a/vendor/github.com/xanzy/go-gitlab/notifications.go
+++ b/vendor/github.com/xanzy/go-gitlab/notifications.go
@@ -9,7 +9,8 @@ import (
// NotificationSettingsService handles communication with the notification settings
// related methods of the GitLab API.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/notification_settings.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notification_settings.md
type NotificationSettingsService struct {
client *Client
}
@@ -17,7 +18,7 @@ type NotificationSettingsService struct {
// NotificationSettings represents the Gitlab notification setting.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/notification_settings.html#notification-settings
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notification_settings.md#notification-settings
type NotificationSettings struct {
Level NotificationLevelValue `json:"level"`
NotificationEmail string `json:"notification_email"`
@@ -27,7 +28,7 @@ type NotificationSettings struct {
// NotificationEvents represents the avialable notification setting events.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/notification_settings.html#notification-settings
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notification_settings.md#notification-settings
type NotificationEvents struct {
CloseIssue bool `json:"close_issue"`
CloseMergeRequest bool `json:"close_merge_request"`
@@ -50,7 +51,7 @@ func (ns NotificationSettings) String() string {
// GetGlobalSettings returns current notification settings and email address.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/notification_settings.html#global-notification-settings
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notification_settings.md#global-notification-settings
func (s *NotificationSettingsService) GetGlobalSettings(options ...OptionFunc) (*NotificationSettings, *Response, error) {
u := "notification_settings"
@@ -90,7 +91,7 @@ type NotificationSettingsOptions struct {
// UpdateGlobalSettings updates current notification settings and email address.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/notification_settings.html#update-global-notification-settings
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notification_settings.md#update-global-notification-settings
func (s *NotificationSettingsService) UpdateGlobalSettings(opt *NotificationSettingsOptions, options ...OptionFunc) (*NotificationSettings, *Response, error) {
if opt.Level != nil && *opt.Level == GlobalNotificationLevel {
return nil, nil, errors.New(
@@ -116,7 +117,7 @@ func (s *NotificationSettingsService) UpdateGlobalSettings(opt *NotificationSett
// GetSettingsForGroup returns current group notification settings.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/notification_settings.html#group-project-level-notification-settings
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notification_settings.md#group-project-level-notification-settings
func (s *NotificationSettingsService) GetSettingsForGroup(gid interface{}, options ...OptionFunc) (*NotificationSettings, *Response, error) {
group, err := parseID(gid)
if err != nil {
@@ -141,7 +142,7 @@ func (s *NotificationSettingsService) GetSettingsForGroup(gid interface{}, optio
// GetSettingsForProject returns current project notification settings.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/notification_settings.html#group-project-level-notification-settings
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notification_settings.md#group-project-level-notification-settings
func (s *NotificationSettingsService) GetSettingsForProject(pid interface{}, options ...OptionFunc) (*NotificationSettings, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -166,7 +167,7 @@ func (s *NotificationSettingsService) GetSettingsForProject(pid interface{}, opt
// UpdateSettingsForGroup updates current group notification settings.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/notification_settings.html#update-group-project-level-notification-settings
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notification_settings.md#update-group-project-level-notification-settings
func (s *NotificationSettingsService) UpdateSettingsForGroup(gid interface{}, opt *NotificationSettingsOptions, options ...OptionFunc) (*NotificationSettings, *Response, error) {
group, err := parseID(gid)
if err != nil {
@@ -191,7 +192,7 @@ func (s *NotificationSettingsService) UpdateSettingsForGroup(gid interface{}, op
// UpdateSettingsForProject updates current project notification settings.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/notification_settings.html#update-group-project-level-notification-settings
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/notification_settings.md#update-group-project-level-notification-settings
func (s *NotificationSettingsService) UpdateSettingsForProject(pid interface{}, opt *NotificationSettingsOptions, options ...OptionFunc) (*NotificationSettings, *Response, error) {
project, err := parseID(pid)
if err != nil {
diff --git a/vendor/github.com/xanzy/go-gitlab/pipelines.go b/vendor/github.com/xanzy/go-gitlab/pipelines.go
index 4ade3fc..6faceb9 100644
--- a/vendor/github.com/xanzy/go-gitlab/pipelines.go
+++ b/vendor/github.com/xanzy/go-gitlab/pipelines.go
@@ -25,14 +25,16 @@ import (
// PipelinesService handles communication with the repositories related
// methods of the GitLab API.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/pipelines.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/pipelines.md
type PipelinesService struct {
client *Client
}
// Pipeline represents a GitLab pipeline.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/pipelines.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/pipelines.md
type Pipeline struct {
ID int `json:"id"`
Status string `json:"status"`
@@ -64,7 +66,8 @@ func (i Pipeline) String() string {
// ListProjectPipelines gets a list of project piplines.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/pipelines.html#list-project-pipelines
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/pipelines.md#list-project-pipelines
func (s *PipelinesService) ListProjectPipelines(pid interface{}, options ...OptionFunc) ([]*Pipeline, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -87,7 +90,8 @@ func (s *PipelinesService) ListProjectPipelines(pid interface{}, options ...Opti
// GetPipeline gets a single project pipeline.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/pipelines.html#get-a-single-pipeline
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/pipelines.md#get-a-single-pipeline
func (s *PipelinesService) GetPipeline(pid interface{}, pipeline int, options ...OptionFunc) (*Pipeline, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -111,14 +115,16 @@ func (s *PipelinesService) GetPipeline(pid interface{}, pipeline int, options ..
// CreatePipelineOptions represents the available CreatePipeline() options.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/pipelines.html#create-a-new-pipeline
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/pipelines.md#create-a-new-pipeline
type CreatePipelineOptions struct {
Ref *string `url:"ref,omitempty" json:"ref"`
}
// CreatePipeline creates a new project pipeline.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/pipelines.html#create-a-new-pipeline
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/pipelines.md#create-a-new-pipeline
func (s *PipelinesService) CreatePipeline(pid interface{}, opt *CreatePipelineOptions, options ...OptionFunc) (*Pipeline, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -143,7 +149,7 @@ func (s *PipelinesService) CreatePipeline(pid interface{}, opt *CreatePipelineOp
// RetryPipelineBuild retries failed builds in a pipeline
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/pipelines.html#retry-failed-builds-in-a-pipeline
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/pipelines.md#retry-failed-builds-in-a-pipeline
func (s *PipelinesService) RetryPipelineBuild(pid interface{}, pipelineID int, options ...OptionFunc) (*Pipeline, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -168,7 +174,7 @@ func (s *PipelinesService) RetryPipelineBuild(pid interface{}, pipelineID int, o
// CancelPipelineBuild cancels a pipeline builds
//
// GitLab API docs:
-//https://docs.gitlab.com/ce/api/pipelines.html#cancel-a-pipelines-builds
+//https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/pipelines.md#cancel-a-pipelines-builds
func (s *PipelinesService) CancelPipelineBuild(pid interface{}, pipelineID int, options ...OptionFunc) (*Pipeline, *Response, error) {
project, err := parseID(pid)
if err != nil {
diff --git a/vendor/github.com/xanzy/go-gitlab/project_snippets.go b/vendor/github.com/xanzy/go-gitlab/project_snippets.go
index 65dab67..e58a6a8 100644
--- a/vendor/github.com/xanzy/go-gitlab/project_snippets.go
+++ b/vendor/github.com/xanzy/go-gitlab/project_snippets.go
@@ -26,14 +26,16 @@ import (
// ProjectSnippetsService handles communication with the project snippets
// related methods of the GitLab API.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/project_snippets.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/project_snippets.md
type ProjectSnippetsService struct {
client *Client
}
// Snippet represents a GitLab project snippet.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/project_snippets.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/project_snippets.md
type Snippet struct {
ID int `json:"id"`
Title string `json:"title"`
@@ -57,14 +59,16 @@ func (s Snippet) String() string {
// ListSnippetsOptions represents the available ListSnippets() options.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/project_snippets.html#list-snippets
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/project_snippets.md#list-snippets
type ListSnippetsOptions struct {
ListOptions
}
// ListSnippets gets a list of project snippets.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/project_snippets.html#list-snippets
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/project_snippets.md#list-snippets
func (s *ProjectSnippetsService) ListSnippets(pid interface{}, opt *ListSnippetsOptions, options ...OptionFunc) ([]*Snippet, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -89,7 +93,7 @@ func (s *ProjectSnippetsService) ListSnippets(pid interface{}, opt *ListSnippets
// GetSnippet gets a single project snippet
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/project_snippets.html#single-snippet
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/project_snippets.md#single-snippet
func (s *ProjectSnippetsService) GetSnippet(pid interface{}, snippet int, options ...OptionFunc) (*Snippet, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -114,7 +118,7 @@ func (s *ProjectSnippetsService) GetSnippet(pid interface{}, snippet int, option
// CreateSnippetOptions represents the available CreateSnippet() options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/project_snippets.html#create-new-snippet
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/project_snippets.md#create-new-snippet
type CreateSnippetOptions struct {
Title *string `url:"title,omitempty" json:"title,omitempty"`
FileName *string `url:"file_name,omitempty" json:"file_name,omitempty"`
@@ -126,7 +130,7 @@ type CreateSnippetOptions struct {
// to create new snippets.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/project_snippets.html#create-new-snippet
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/project_snippets.md#create-new-snippet
func (s *ProjectSnippetsService) CreateSnippet(pid interface{}, opt *CreateSnippetOptions, options ...OptionFunc) (*Snippet, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -151,7 +155,7 @@ func (s *ProjectSnippetsService) CreateSnippet(pid interface{}, opt *CreateSnipp
// UpdateSnippetOptions represents the available UpdateSnippet() options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/project_snippets.html#update-snippet
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/project_snippets.md#update-snippet
type UpdateSnippetOptions struct {
Title *string `url:"title,omitempty" json:"title,omitempty"`
FileName *string `url:"file_name,omitempty" json:"file_name,omitempty"`
@@ -163,7 +167,7 @@ type UpdateSnippetOptions struct {
// permission to change an existing snippet.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/project_snippets.html#update-snippet
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/project_snippets.md#update-snippet
func (s *ProjectSnippetsService) UpdateSnippet(pid interface{}, snippet int, opt *UpdateSnippetOptions, options ...OptionFunc) (*Snippet, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -190,7 +194,7 @@ func (s *ProjectSnippetsService) UpdateSnippet(pid interface{}, snippet int, opt
// code.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/project_snippets.html#delete-snippet
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/project_snippets.md#delete-snippet
func (s *ProjectSnippetsService) DeleteSnippet(pid interface{}, snippet int, options ...OptionFunc) (*Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -209,7 +213,7 @@ func (s *ProjectSnippetsService) DeleteSnippet(pid interface{}, snippet int, opt
// SnippetContent returns the raw project snippet as plain text.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/project_snippets.html#snippet-content
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/project_snippets.md#snippet-content
func (s *ProjectSnippetsService) SnippetContent(pid interface{}, snippet int, options ...OptionFunc) ([]byte, *Response, error) {
project, err := parseID(pid)
if err != nil {
diff --git a/vendor/github.com/xanzy/go-gitlab/projects.go b/vendor/github.com/xanzy/go-gitlab/projects.go
index 86cd0d3..275cacd 100644
--- a/vendor/github.com/xanzy/go-gitlab/projects.go
+++ b/vendor/github.com/xanzy/go-gitlab/projects.go
@@ -25,14 +25,16 @@ import (
// ProjectsService handles communication with the repositories related methods
// of the GitLab API.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/projects.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md
type ProjectsService struct {
client *Client
}
// Project represents a GitLab project.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/projects.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md
type Project struct {
ID int `json:"id"`
Description string `json:"description"`
@@ -77,6 +79,7 @@ type Project struct {
GroupName string `json:"group_name"`
GroupAccessLevel int `json:"group_access_level"`
} `json:"shared_with_groups"`
+ Statistics *ProjectStatistics `json:"statistics"`
}
// Repository represents a repository.
@@ -108,6 +111,20 @@ type ProjectNamespace struct {
UpdatedAt *time.Time `json:"updated_at"`
}
+// StorageStatistics represents a statistics record for a group or project.
+type StorageStatistics struct {
+ StorageSize int64 `json:"storage_size"`
+ RepositorySize int64 `json:"repository_size"`
+ LfsObjectsSize int64 `json:"lfs_objects_size"`
+ BuildArtifactsSize int64 `json:"build_artifacts_size"`
+}
+
+// ProjectStatistics represents a statistics record for a project.
+type ProjectStatistics struct {
+ StorageStatistics
+ CommitCount int `json:"commit_count"`
+}
+
// Permissions represents premissions.
type Permissions struct {
ProjectAccess *ProjectAccess `json:"project_access"`
@@ -132,7 +149,8 @@ func (s Project) String() string {
// ListProjectsOptions represents the available ListProjects() options.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/projects.html#list-projects
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#list-projects
type ListProjectsOptions struct {
ListOptions
Archived *bool `url:"archived,omitempty" json:"archived,omitempty"`
@@ -141,11 +159,13 @@ type ListProjectsOptions struct {
Search *string `url:"search,omitempty" json:"search,omitempty"`
Simple *bool `url:"simple,omitempty" json:"simple,omitempty"`
Visibility *string `url:"visibility,omitempty" json:"visibility,omitempty"`
+ Statistics *bool `url:"statistics,omitempty" json:"statistics,omitempty"`
}
// ListProjects gets a list of projects accessible by the authenticated user.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/projects.html#list-projects
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#list-projects
func (s *ProjectsService) ListProjects(opt *ListProjectsOptions, options ...OptionFunc) ([]*Project, *Response, error) {
req, err := s.client.NewRequest("GET", "projects", opt, options)
if err != nil {
@@ -165,7 +185,7 @@ func (s *ProjectsService) ListProjects(opt *ListProjectsOptions, options ...Opti
// authenticated user.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/projects.html#list-owned-projects
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#list-owned-projects
func (s *ProjectsService) ListOwnedProjects(opt *ListProjectsOptions, options ...OptionFunc) ([]*Project, *Response, error) {
req, err := s.client.NewRequest("GET", "projects/owned", opt, options)
if err != nil {
@@ -185,7 +205,7 @@ func (s *ProjectsService) ListOwnedProjects(opt *ListProjectsOptions, options ..
// authenticated user.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/projects.html#list-starred-projects
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#list-starred-projects
func (s *ProjectsService) ListStarredProjects(opt *ListProjectsOptions, options ...OptionFunc) ([]*Project, *Response, error) {
req, err := s.client.NewRequest("GET", "projects/starred", opt, options)
if err != nil {
@@ -204,7 +224,7 @@ func (s *ProjectsService) ListStarredProjects(opt *ListProjectsOptions, options
// ListAllProjects gets a list of all GitLab projects (admin only).
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/projects.html#list-all-projects
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#list-all-projects
func (s *ProjectsService) ListAllProjects(opt *ListProjectsOptions, options ...OptionFunc) ([]*Project, *Response, error) {
req, err := s.client.NewRequest("GET", "projects/all", opt, options)
if err != nil {
@@ -224,7 +244,7 @@ func (s *ProjectsService) ListAllProjects(opt *ListProjectsOptions, options ...O
// NAMESPACE/PROJECT_NAME, which is owned by the authenticated user.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/projects.html#get-single-project
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#get-single-project
func (s *ProjectsService) GetProject(pid interface{}, options ...OptionFunc) (*Project, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -249,7 +269,7 @@ func (s *ProjectsService) GetProject(pid interface{}, options ...OptionFunc) (*P
// SearchProjectsOptions represents the available SearchProjects() options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/projects.html#search-for-projects-by-name
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#search-for-projects-by-name
type SearchProjectsOptions struct {
ListOptions
OrderBy *string `url:"order_by,omitempty" json:"order_by,omitempty"`
@@ -260,7 +280,7 @@ type SearchProjectsOptions struct {
// authenticated user.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/projects.html#search-for-projects-by-name
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#search-for-projects-by-name
func (s *ProjectsService) SearchProjects(query string, opt *SearchProjectsOptions, options ...OptionFunc) ([]*Project, *Response, error) {
u := fmt.Sprintf("projects/search/%s", query)
@@ -281,7 +301,7 @@ func (s *ProjectsService) SearchProjects(query string, opt *SearchProjectsOption
// ProjectEvent represents a GitLab project event.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/projects.html#get-project-events
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#get-project-events
type ProjectEvent struct {
Title interface{} `json:"title"`
ProjectID int `json:"project_id"`
@@ -310,7 +330,7 @@ func (s ProjectEvent) String() string {
// GetProjectEventsOptions represents the available GetProjectEvents() options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/projects.html#get-project-events
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#get-project-events
type GetProjectEventsOptions struct {
ListOptions
}
@@ -319,7 +339,7 @@ type GetProjectEventsOptions struct {
// newest to latest.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/projects.html#get-project-events
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#get-project-events
func (s *ProjectsService) GetProjectEvents(pid interface{}, opt *GetProjectEventsOptions, options ...OptionFunc) ([]*ProjectEvent, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -343,7 +363,8 @@ func (s *ProjectsService) GetProjectEvents(pid interface{}, opt *GetProjectEvent
// CreateProjectOptions represents the available CreateProjects() options.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/projects.html#create-project
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#create-project
type CreateProjectOptions struct {
Name *string `url:"name,omitempty" json:"name,omitempty"`
Path *string `url:"path,omitempty" json:"path,omitempty"`
@@ -367,7 +388,8 @@ type CreateProjectOptions struct {
// CreateProject creates a new project owned by the authenticated user.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/projects.html#create-project
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#create-project
func (s *ProjectsService) CreateProject(opt *CreateProjectOptions, options ...OptionFunc) (*Project, *Response, error) {
req, err := s.client.NewRequest("POST", "projects", opt, options)
if err != nil {
@@ -387,7 +409,7 @@ func (s *ProjectsService) CreateProject(opt *CreateProjectOptions, options ...Op
// options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/projects.html#create-project-for-user
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#create-project-for-user
type CreateProjectForUserOptions struct {
Name *string `url:"name,omitempty" json:"name,omitempty"`
Description *string `url:"description,omitempty" json:"description,omitempty"`
@@ -405,7 +427,7 @@ type CreateProjectForUserOptions struct {
// Available only for admins.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/projects.html#create-project-for-user
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#create-project-for-user
func (s *ProjectsService) CreateProjectForUser(user int, opt *CreateProjectForUserOptions, options ...OptionFunc) (*Project, *Response, error) {
u := fmt.Sprintf("projects/user/%d", user)
@@ -425,7 +447,8 @@ func (s *ProjectsService) CreateProjectForUser(user int, opt *CreateProjectForUs
// EditProjectOptions represents the available EditProject() options.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/projects.html#edit-project
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#edit-project
type EditProjectOptions struct {
Name *string `url:"name,omitempty" json:"name,omitempty"`
Path *string `url:"path,omitempty" json:"path,omitempty"`
@@ -451,7 +474,8 @@ type EditProjectOptions struct {
// EditProject updates an existing project.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/projects.html#edit-project
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#edit-project
func (s *ProjectsService) EditProject(pid interface{}, opt *EditProjectOptions, options ...OptionFunc) (*Project, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -476,7 +500,8 @@ func (s *ProjectsService) EditProject(pid interface{}, opt *EditProjectOptions,
// ForkProject forks a project into the user namespace of the authenticated
// user.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/projects.html#fork-project
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#fork-project
func (s *ProjectsService) ForkProject(pid interface{}, options ...OptionFunc) (*Project, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -501,7 +526,8 @@ func (s *ProjectsService) ForkProject(pid interface{}, options ...OptionFunc) (*
// DeleteProject removes a project including all associated resources
// (issues, merge requests etc.)
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/projects.html#remove-project
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#remove-project
func (s *ProjectsService) DeleteProject(pid interface{}, options ...OptionFunc) (*Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -520,7 +546,7 @@ func (s *ProjectsService) DeleteProject(pid interface{}, options ...OptionFunc)
// ProjectMember represents a project member.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/projects.html#list-project-team-members
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#list-project-team-members
type ProjectMember struct {
ID int `json:"id"`
Username string `json:"username"`
@@ -535,7 +561,7 @@ type ProjectMember struct {
// options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/projects.html#list-project-team-members
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#list-project-team-members
type ListProjectMembersOptions struct {
ListOptions
Query *string `url:"query,omitempty" json:"query,omitempty"`
@@ -544,7 +570,7 @@ type ListProjectMembersOptions struct {
// ListProjectMembers gets a list of a project's team members.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/projects.html#list-project-team-members
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#list-project-team-members
func (s *ProjectsService) ListProjectMembers(pid interface{}, opt *ListProjectMembersOptions, options ...OptionFunc) ([]*ProjectMember, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -569,7 +595,7 @@ func (s *ProjectsService) ListProjectMembers(pid interface{}, opt *ListProjectMe
// GetProjectMember gets a project team member.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/projects.html#get-project-team-member
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#get-project-team-member
func (s *ProjectsService) GetProjectMember(pid interface{}, user int, options ...OptionFunc) (*ProjectMember, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -594,7 +620,7 @@ func (s *ProjectsService) GetProjectMember(pid interface{}, user int, options ..
// AddProjectMemberOptions represents the available AddProjectMember() options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/projects.html#add-project-team-member
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#add-project-team-member
type AddProjectMemberOptions struct {
UserID *int `url:"user_id,omitempty" json:"user_id,omitempty"`
AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"`
@@ -606,7 +632,7 @@ type AddProjectMemberOptions struct {
// existing membership.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/projects.html#add-project-team-member
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#add-project-team-member
func (s *ProjectsService) AddProjectMember(pid interface{}, opt *AddProjectMemberOptions, options ...OptionFunc) (*ProjectMember, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -631,7 +657,7 @@ func (s *ProjectsService) AddProjectMember(pid interface{}, opt *AddProjectMembe
// EditProjectMemberOptions represents the available EditProjectMember() options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/projects.html#edit-project-team-member
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#edit-project-team-member
type EditProjectMemberOptions struct {
AccessLevel *AccessLevelValue `url:"access_level,omitempty" json:"access_level,omitempty"`
}
@@ -639,7 +665,7 @@ type EditProjectMemberOptions struct {
// EditProjectMember updates a project team member to a specified access level..
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/projects.html#edit-project-team-member
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#edit-project-team-member
func (s *ProjectsService) EditProjectMember(pid interface{}, user int, opt *EditProjectMemberOptions, options ...OptionFunc) (*ProjectMember, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -664,7 +690,7 @@ func (s *ProjectsService) EditProjectMember(pid interface{}, user int, opt *Edit
// DeleteProjectMember removes a user from a project team.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/projects.html#remove-project-team-member
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#remove-project-team-member
func (s *ProjectsService) DeleteProjectMember(pid interface{}, user int, options ...OptionFunc) (*Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -683,7 +709,7 @@ func (s *ProjectsService) DeleteProjectMember(pid interface{}, user int, options
// ProjectHook represents a project hook.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/projects.html#list-project-hooks
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#list-project-hooks
type ProjectHook struct {
ID int `json:"id"`
URL string `json:"url"`
@@ -702,7 +728,8 @@ type ProjectHook struct {
// ListProjectHooksOptions represents the available ListProjectHooks() options.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/projects.html#list-project-hooks
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#list-project-hooks
type ListProjectHooksOptions struct {
ListOptions
}
@@ -710,7 +737,7 @@ type ListProjectHooksOptions struct {
// ListProjectHooks gets a list of project hooks.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/projects.html#list-project-hooks
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#list-project-hooks
func (s *ProjectsService) ListProjectHooks(pid interface{}, opt *ListProjectHooksOptions, options ...OptionFunc) ([]*ProjectHook, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -735,7 +762,7 @@ func (s *ProjectsService) ListProjectHooks(pid interface{}, opt *ListProjectHook
// GetProjectHook gets a specific hook for a project.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/projects.html#get-project-hook
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#get-project-hook
func (s *ProjectsService) GetProjectHook(pid interface{}, hook int, options ...OptionFunc) (*ProjectHook, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -760,7 +787,7 @@ func (s *ProjectsService) GetProjectHook(pid interface{}, hook int, options ...O
// AddProjectHookOptions represents the available AddProjectHook() options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/projects.html#add-project-hook
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#add-project-hook
type AddProjectHookOptions struct {
URL *string `url:"url,omitempty" json:"url,omitempty"`
PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"`
@@ -778,7 +805,7 @@ type AddProjectHookOptions struct {
// AddProjectHook adds a hook to a specified project.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/projects.html#add-project-hook
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#add-project-hook
func (s *ProjectsService) AddProjectHook(pid interface{}, opt *AddProjectHookOptions, options ...OptionFunc) (*ProjectHook, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -803,7 +830,7 @@ func (s *ProjectsService) AddProjectHook(pid interface{}, opt *AddProjectHookOpt
// EditProjectHookOptions represents the available EditProjectHook() options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/projects.html#edit-project-hook
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#edit-project-hook
type EditProjectHookOptions struct {
URL *string `url:"url,omitempty" json:"url,omitempty"`
PushEvents *bool `url:"push_events,omitempty" json:"push_events,omitempty"`
@@ -821,7 +848,7 @@ type EditProjectHookOptions struct {
// EditProjectHook edits a hook for a specified project.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/projects.html#edit-project-hook
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#edit-project-hook
func (s *ProjectsService) EditProjectHook(pid interface{}, hook int, opt *EditProjectHookOptions, options ...OptionFunc) (*ProjectHook, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -847,7 +874,7 @@ func (s *ProjectsService) EditProjectHook(pid interface{}, hook int, opt *EditPr
// method and can be called multiple times. Either the hook is available or not.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/projects.html#delete-project-hook
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#delete-project-hook
func (s *ProjectsService) DeleteProjectHook(pid interface{}, hook int, options ...OptionFunc) (*Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -863,10 +890,124 @@ func (s *ProjectsService) DeleteProjectHook(pid interface{}, hook int, options .
return s.client.Do(req, nil)
}
+// BuildTrigger represents a project build trigger.
+//
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/build_triggers.md#build-triggers
+type BuildTrigger struct {
+ CreatedAt *time.Time `json:"created_at"`
+ DeletedAt *time.Time `json:"deleted_at"`
+ LastUsed *time.Time `json:"last_used"`
+ Token string `json:"token"`
+ UpdatedAt *time.Time `json:"updated_at"`
+}
+
+// ListBuildTriggersOptions represents the available ListBuildTriggers() options.
+//
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/build_triggers.md#list-project-triggers
+type ListBuildTriggersOptions struct {
+ ListOptions
+}
+
+// ListBuildTriggers gets a list of project triggers.
+//
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/build_triggers.md#list-project-triggers
+func (s *ProjectsService) ListBuildTriggers(pid interface{}, opt *ListBuildTriggersOptions, options ...OptionFunc) ([]*BuildTrigger, *Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, nil, err
+ }
+ u := fmt.Sprintf("projects/%s/triggers", url.QueryEscape(project))
+
+ req, err := s.client.NewRequest("GET", u, opt, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ var bt []*BuildTrigger
+ resp, err := s.client.Do(req, &bt)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return bt, resp, err
+}
+
+// GetBuildTrigger gets a specific build trigger for a project.
+//
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/build_triggers.md#get-trigger-details
+func (s *ProjectsService) GetBuildTrigger(pid interface{}, token string, options ...OptionFunc) (*BuildTrigger, *Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, nil, err
+ }
+ u := fmt.Sprintf("projects/%s/triggers/%v", url.QueryEscape(project), token)
+
+ req, err := s.client.NewRequest("GET", u, nil, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ bt := new(BuildTrigger)
+ resp, err := s.client.Do(req, bt)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return bt, resp, err
+}
+
+// AddBuildTrigger adds a build trigger to a specified project.
+//
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/build_triggers.md#create-a-project-trigger
+func (s *ProjectsService) AddBuildTrigger(pid interface{}, options ...OptionFunc) (*BuildTrigger, *Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, nil, err
+ }
+ u := fmt.Sprintf("projects/%s/triggers", url.QueryEscape(project))
+
+ req, err := s.client.NewRequest("POST", u, nil, options)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ bt := new(BuildTrigger)
+ resp, err := s.client.Do(req, bt)
+ if err != nil {
+ return nil, resp, err
+ }
+
+ return bt, resp, err
+}
+
+// DeleteBuildTrigger removes a trigger from a project.
+//
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/build_triggers.md#remove-a-project-trigger
+func (s *ProjectsService) DeleteBuildTrigger(pid interface{}, token string, options ...OptionFunc) (*Response, error) {
+ project, err := parseID(pid)
+ if err != nil {
+ return nil, err
+ }
+ u := fmt.Sprintf("projects/%s/triggers/%s", url.QueryEscape(project), token)
+
+ req, err := s.client.NewRequest("DELETE", u, nil, options)
+ if err != nil {
+ return nil, err
+ }
+
+ return s.client.Do(req, nil)
+}
+
// ProjectForkRelation represents a project fork relationship.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/projects.html#admin-fork-relation
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#admin-fork-relation
type ProjectForkRelation struct {
ID int `json:"id"`
ForkedToProjectID int `json:"forked_to_project_id"`
@@ -879,7 +1020,7 @@ type ProjectForkRelation struct {
// existing projects.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/projects.html#create-a-forked-fromto-relation-between-existing-projects.
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#create-a-forked-fromto-relation-between-existing-projects.
func (s *ProjectsService) CreateProjectForkRelation(pid int, fork int, options ...OptionFunc) (*ProjectForkRelation, *Response, error) {
u := fmt.Sprintf("projects/%d/fork/%d", pid, fork)
@@ -900,7 +1041,7 @@ func (s *ProjectsService) CreateProjectForkRelation(pid int, fork int, options .
// DeleteProjectForkRelation deletes an existing forked from relationship.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/projects.html#delete-an-existing-forked-from-relationship
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#delete-an-existing-forked-from-relationship
func (s *ProjectsService) DeleteProjectForkRelation(pid int, options ...OptionFunc) (*Response, error) {
u := fmt.Sprintf("projects/%d/fork", pid)
@@ -916,7 +1057,7 @@ func (s *ProjectsService) DeleteProjectForkRelation(pid int, options ...OptionFu
// project owner of this project.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/projects.html#archive-a-project
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#archive-a-project
func (s *ProjectsService) ArchiveProject(pid interface{}, options ...OptionFunc) (*Project, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -942,7 +1083,7 @@ func (s *ProjectsService) ArchiveProject(pid interface{}, options ...OptionFunc)
// the project owner of this project.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/projects.html#unarchive-a-project
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#unarchive-a-project
func (s *ProjectsService) UnarchiveProject(pid interface{}, options ...OptionFunc) (*Project, *Response, error) {
project, err := parseID(pid)
if err != nil {
diff --git a/vendor/github.com/xanzy/go-gitlab/repositories.go b/vendor/github.com/xanzy/go-gitlab/repositories.go
index aa1052f..02a0635 100644
--- a/vendor/github.com/xanzy/go-gitlab/repositories.go
+++ b/vendor/github.com/xanzy/go-gitlab/repositories.go
@@ -25,14 +25,16 @@ import (
// RepositoriesService handles communication with the repositories related
// methods of the GitLab API.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/repositories.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repositories.md
type RepositoriesService struct {
client *Client
}
// TreeNode represents a GitLab repository file or directory.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/repositories.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repositories.md
type TreeNode struct {
ID string `json:"id"`
Name string `json:"name"`
@@ -47,7 +49,7 @@ func (t TreeNode) String() string {
// ListTreeOptions represents the available ListTree() options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/repositories.html#list-repository-tree
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repositories.md#list-repository-tree
type ListTreeOptions struct {
Path *string `url:"path,omitempty" json:"path,omitempty"`
RefName *string `url:"ref_name,omitempty" json:"ref_name,omitempty"`
@@ -56,7 +58,7 @@ type ListTreeOptions struct {
// ListTree gets a list of repository files and directories in a project.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/repositories.html#list-repository-tree
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repositories.md#list-repository-tree
func (s *RepositoriesService) ListTree(pid interface{}, opt *ListTreeOptions, options ...OptionFunc) ([]*TreeNode, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -81,7 +83,7 @@ func (s *RepositoriesService) ListTree(pid interface{}, opt *ListTreeOptions, op
// RawFileContentOptions represents the available RawFileContent() options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/repositories.html#raw-file-content
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repositories.md#raw-file-content
type RawFileContentOptions struct {
FilePath *string `url:"filepath,omitempty" json:"filepath,omitempty"`
}
@@ -89,7 +91,7 @@ type RawFileContentOptions struct {
// RawFileContent gets the raw file contents for a file by commit SHA and path
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/repositories.html#raw-file-content
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repositories.md#raw-file-content
func (s *RepositoriesService) RawFileContent(pid interface{}, sha string, opt *RawFileContentOptions, options ...OptionFunc) ([]byte, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -114,7 +116,7 @@ func (s *RepositoriesService) RawFileContent(pid interface{}, sha string, opt *R
// RawBlobContent gets the raw file contents for a blob by blob SHA.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/repositories.html#raw-blob-content
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repositories.md#raw-blob-content
func (s *RepositoriesService) RawBlobContent(pid interface{}, sha string, options ...OptionFunc) ([]byte, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -139,7 +141,7 @@ func (s *RepositoriesService) RawBlobContent(pid interface{}, sha string, option
// ArchiveOptions represents the available Archive() options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/repositories.html#get-file-archive
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repositories.md#get-file-archive
type ArchiveOptions struct {
SHA *string `url:"sha,omitempty" json:"sha,omitempty"`
}
@@ -147,7 +149,7 @@ type ArchiveOptions struct {
// Archive gets an archive of the repository.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/repositories.html#get-file-archive
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repositories.md#get-file-archive
func (s *RepositoriesService) Archive(pid interface{}, opt *ArchiveOptions, options ...OptionFunc) ([]byte, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -172,7 +174,7 @@ func (s *RepositoriesService) Archive(pid interface{}, opt *ArchiveOptions, opti
// Compare represents the result of a comparison of branches, tags or commits.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/repositories.html#compare-branches-tags-or-commits
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repositories.md#compare-branches-tags-or-commits
type Compare struct {
Commit *Commit `json:"commit"`
Commits []*Commit `json:"commits"`
@@ -188,7 +190,7 @@ func (c Compare) String() string {
// CompareOptions represents the available Compare() options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/repositories.html#compare-branches-tags-or-commits
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repositories.md#compare-branches-tags-or-commits
type CompareOptions struct {
From *string `url:"from,omitempty" json:"from,omitempty"`
To *string `url:"to,omitempty" json:"to,omitempty"`
@@ -197,7 +199,7 @@ type CompareOptions struct {
// Compare compares branches, tags or commits.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/repositories.html#compare-branches-tags-or-commits
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repositories.md#compare-branches-tags-or-commits
func (s *RepositoriesService) Compare(pid interface{}, opt *CompareOptions, options ...OptionFunc) (*Compare, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -221,7 +223,8 @@ func (s *RepositoriesService) Compare(pid interface{}, opt *CompareOptions, opti
// Contributor represents a GitLap contributor.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/repositories.html#contributer
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repositories.md#contributer
type Contributor struct {
Name string `json:"name,omitempty"`
Email string `json:"email,omitempty"`
@@ -236,7 +239,8 @@ func (c Contributor) String() string {
// Contributors gets the repository contributors list.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/repositories.html#contributer
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repositories.md#contributer
func (s *RepositoriesService) Contributors(pid interface{}, options ...OptionFunc) ([]*Contributor, *Response, error) {
project, err := parseID(pid)
if err != nil {
diff --git a/vendor/github.com/xanzy/go-gitlab/repository_files.go b/vendor/github.com/xanzy/go-gitlab/repository_files.go
index 21e5f65..5c0bc4b 100644
--- a/vendor/github.com/xanzy/go-gitlab/repository_files.go
+++ b/vendor/github.com/xanzy/go-gitlab/repository_files.go
@@ -24,14 +24,16 @@ import (
// RepositoryFilesService handles communication with the repository files
// related methods of the GitLab API.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/repository_files.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repository_files.md
type RepositoryFilesService struct {
client *Client
}
// File represents a GitLab repository file.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/repository_files.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repository_files.md
type File struct {
FileName string `json:"file_name"`
FilePath string `json:"file_path"`
@@ -50,7 +52,7 @@ func (r File) String() string {
// GetFileOptions represents the available GetFile() options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/repository_files.html#get-file-from-respository
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repository_files.md#get-file-from-respository
type GetFileOptions struct {
FilePath *string `url:"file_path,omitempty" json:"file_path,omitempty"`
Ref *string `url:"ref,omitempty" json:"ref,omitempty"`
@@ -60,7 +62,7 @@ type GetFileOptions struct {
// name, size, content. Note that file content is Base64 encoded.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/repository_files.html#get-file-from-respository
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repository_files.md#get-file-from-respository
func (s *RepositoryFilesService) GetFile(pid interface{}, opt *GetFileOptions, options ...OptionFunc) (*File, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -84,7 +86,8 @@ func (s *RepositoryFilesService) GetFile(pid interface{}, opt *GetFileOptions, o
// FileInfo represents file details of a GitLab repository file.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/repository_files.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repository_files.md
type FileInfo struct {
FilePath string `json:"file_path"`
BranchName string `json:"branch_name"`
@@ -97,7 +100,7 @@ func (r FileInfo) String() string {
// CreateFileOptions represents the available CreateFile() options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/repository_files.html#create-new-file-in-repository
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repository_files.md#create-new-file-in-repository
type CreateFileOptions struct {
FilePath *string `url:"file_path,omitempty" json:"file_path,omitempty"`
BranchName *string `url:"branch_name,omitempty" json:"branch_name,omitempty"`
@@ -111,7 +114,7 @@ type CreateFileOptions struct {
// CreateFile creates a new file in a repository.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/repository_files.html#create-new-file-in-repository
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repository_files.md#create-new-file-in-repository
func (s *RepositoryFilesService) CreateFile(pid interface{}, opt *CreateFileOptions, options ...OptionFunc) (*FileInfo, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -136,7 +139,7 @@ func (s *RepositoryFilesService) CreateFile(pid interface{}, opt *CreateFileOpti
// UpdateFileOptions represents the available UpdateFile() options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/repository_files.html#update-existing-file-in-repository
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repository_files.md#update-existing-file-in-repository
type UpdateFileOptions struct {
FilePath *string `url:"file_path,omitempty" json:"file_path,omitempty"`
BranchName *string `url:"branch_name,omitempty" json:"branch_name,omitempty"`
@@ -150,7 +153,7 @@ type UpdateFileOptions struct {
// UpdateFile updates an existing file in a repository
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/repository_files.html#update-existing-file-in-repository
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repository_files.md#update-existing-file-in-repository
func (s *RepositoryFilesService) UpdateFile(pid interface{}, opt *UpdateFileOptions, options ...OptionFunc) (*FileInfo, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -175,7 +178,7 @@ func (s *RepositoryFilesService) UpdateFile(pid interface{}, opt *UpdateFileOpti
// DeleteFileOptions represents the available DeleteFile() options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/repository_files.html#delete-existing-file-in-repository
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repository_files.md#delete-existing-file-in-repository
type DeleteFileOptions struct {
FilePath *string `url:"file_path,omitempty" json:"file_path,omitempty"`
BranchName *string `url:"branch_name,omitempty" json:"branch_name,omitempty"`
@@ -187,7 +190,7 @@ type DeleteFileOptions struct {
// DeleteFile deletes an existing file in a repository
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/repository_files.html#delete-existing-file-in-repository
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/repository_files.md#delete-existing-file-in-repository
func (s *RepositoryFilesService) DeleteFile(pid interface{}, opt *DeleteFileOptions, options ...OptionFunc) (*FileInfo, *Response, error) {
project, err := parseID(pid)
if err != nil {
diff --git a/vendor/github.com/xanzy/go-gitlab/services.go b/vendor/github.com/xanzy/go-gitlab/services.go
index dbcdf84..7dd094d 100644
--- a/vendor/github.com/xanzy/go-gitlab/services.go
+++ b/vendor/github.com/xanzy/go-gitlab/services.go
@@ -25,14 +25,16 @@ import (
// ServicesService handles communication with the services related methods of
// the GitLab API.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/services.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/services.md
type ServicesService struct {
client *Client
}
// Service represents a GitLab service.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/services.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/services.md
type Service struct {
ID *int `json:"id"`
Title *string `json:"title"`
@@ -50,7 +52,7 @@ type Service struct {
// options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/services.html#edit-gitlab-ci-service
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/services.md#edit-gitlab-ci-service
type SetGitLabCIServiceOptions struct {
Token *string `url:"token,omitempty" json:"token,omitempty"`
ProjectURL *string `url:"project_url,omitempty" json:"project_url,omitempty"`
@@ -59,7 +61,7 @@ type SetGitLabCIServiceOptions struct {
// SetGitLabCIService sets GitLab CI service for a project.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/services.html#edit-gitlab-ci-service
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/services.md#edit-gitlab-ci-service
func (s *ServicesService) SetGitLabCIService(pid interface{}, opt *SetGitLabCIServiceOptions, options ...OptionFunc) (*Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -78,7 +80,7 @@ func (s *ServicesService) SetGitLabCIService(pid interface{}, opt *SetGitLabCISe
// DeleteGitLabCIService deletes GitLab CI service settings for a project.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/services.html#delete-gitlab-ci-service
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/services.md#delete-gitlab-ci-service
func (s *ServicesService) DeleteGitLabCIService(pid interface{}, options ...OptionFunc) (*Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -98,7 +100,7 @@ func (s *ServicesService) DeleteGitLabCIService(pid interface{}, options ...Opti
// options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/services.html#edit-hipchat-service
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/services.md#edit-hipchat-service
type SetHipChatServiceOptions struct {
Token *string `url:"token,omitempty" json:"token,omitempty" `
Room *string `url:"room,omitempty" json:"room,omitempty"`
@@ -107,7 +109,7 @@ type SetHipChatServiceOptions struct {
// SetHipChatService sets HipChat service for a project
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/services.html#edit-hipchat-service
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/services.md#edit-hipchat-service
func (s *ServicesService) SetHipChatService(pid interface{}, opt *SetHipChatServiceOptions, options ...OptionFunc) (*Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -126,7 +128,7 @@ func (s *ServicesService) SetHipChatService(pid interface{}, opt *SetHipChatServ
// DeleteHipChatService deletes HipChat service for project.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/services.html#delete-hipchat-service
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/services.md#delete-hipchat-service
func (s *ServicesService) DeleteHipChatService(pid interface{}, options ...OptionFunc) (*Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -146,7 +148,7 @@ func (s *ServicesService) DeleteHipChatService(pid interface{}, options ...Optio
// options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/services.html#createedit-drone-ci-service
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/services.md#createedit-drone-ci-service
type SetDroneCIServiceOptions struct {
Token *string `url:"token" json:"token" `
DroneURL *string `url:"drone_url" json:"drone_url"`
@@ -156,7 +158,7 @@ type SetDroneCIServiceOptions struct {
// SetDroneCIService sets Drone CI service for a project.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/services.html#createedit-drone-ci-service
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/services.md#createedit-drone-ci-service
func (s *ServicesService) SetDroneCIService(pid interface{}, opt *SetDroneCIServiceOptions, options ...OptionFunc) (*Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -175,7 +177,7 @@ func (s *ServicesService) SetDroneCIService(pid interface{}, opt *SetDroneCIServ
// DeleteDroneCIService deletes Drone CI service settings for a project.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/services.html#delete-drone-ci-service
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/services.md#delete-drone-ci-service
func (s *ServicesService) DeleteDroneCIService(pid interface{}, options ...OptionFunc) (*Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -207,7 +209,7 @@ type DroneCIService struct {
// GetDroneCIService gets Drone CI service settings for a project.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/services.html#get-drone-ci-service-settings
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/services.md#get-drone-ci-service-settings
func (s *ServicesService) GetDroneCIService(pid interface{}, options ...OptionFunc) (*DroneCIService, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -233,7 +235,7 @@ func (s *ServicesService) GetDroneCIService(pid interface{}, options ...OptionFu
// options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/services.html#edit-slack-service
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/services.md#edit-slack-service
type SetSlackServiceOptions struct {
WebHook *string `url:"webhook,omitempty" json:"webhook,omitempty" `
Username *string `url:"username,omitempty" json:"username,omitempty" `
@@ -243,7 +245,7 @@ type SetSlackServiceOptions struct {
// SetSlackService sets Slack service for a project
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/services.html#edit-slack-service
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/services.md#edit-slack-service
func (s *ServicesService) SetSlackService(pid interface{}, opt *SetSlackServiceOptions, options ...OptionFunc) (*Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -262,7 +264,7 @@ func (s *ServicesService) SetSlackService(pid interface{}, opt *SetSlackServiceO
// DeleteSlackService deletes Slack service for project.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/services.html#delete-slack-service
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/services.md#delete-slack-service
func (s *ServicesService) DeleteSlackService(pid interface{}, options ...OptionFunc) (*Response, error) {
project, err := parseID(pid)
if err != nil {
diff --git a/vendor/github.com/xanzy/go-gitlab/session.go b/vendor/github.com/xanzy/go-gitlab/session.go
index 571483c..a5083c2 100644
--- a/vendor/github.com/xanzy/go-gitlab/session.go
+++ b/vendor/github.com/xanzy/go-gitlab/session.go
@@ -21,14 +21,16 @@ import "time"
// SessionService handles communication with the session related methods of
// the GitLab API.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/session.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/session.md
type SessionService struct {
client *Client
}
// Session represents a GitLab session.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/session.html#session
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/session.md#session
type Session struct {
ID int `json:"id"`
Username string `json:"username"`
@@ -52,7 +54,8 @@ type Session struct {
// GetSessionOptions represents the available Session() options.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/session.html#session
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/session.md#session
type GetSessionOptions struct {
Login *string `url:"login,omitempty" json:"login,omitempty"`
Email *string `url:"email,omitempty" json:"email,omitempty"`
@@ -61,7 +64,8 @@ type GetSessionOptions struct {
// GetSession logs in to get private token.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/session.html#session
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/session.md#session
func (s *SessionService) GetSession(opt *GetSessionOptions, options ...OptionFunc) (*Session, *Response, error) {
req, err := s.client.NewRequest("POST", "session", opt, options)
if err != nil {
diff --git a/vendor/github.com/xanzy/go-gitlab/settings.go b/vendor/github.com/xanzy/go-gitlab/settings.go
index 41e9c9d..a5c2d61 100644
--- a/vendor/github.com/xanzy/go-gitlab/settings.go
+++ b/vendor/github.com/xanzy/go-gitlab/settings.go
@@ -21,14 +21,16 @@ import "time"
// SettingsService handles communication with the application SettingsService
// related methods of the GitLab API.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/settings.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/settings.md
type SettingsService struct {
client *Client
}
// Settings represents the GitLab application settings.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/settings.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/settings.md
type Settings struct {
ID int `json:"id"`
DefaultProjectsLimit int `json:"default_projects_limit"`
@@ -58,7 +60,7 @@ func (s Settings) String() string {
// GetSettings gets the current application settings.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/settings.html#get-current-application.settings
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/settings.md#get-current-application.settings
func (s *SettingsService) GetSettings(options ...OptionFunc) (*Settings, *Response, error) {
req, err := s.client.NewRequest("GET", "application/settings", nil, options)
if err != nil {
@@ -77,7 +79,7 @@ func (s *SettingsService) GetSettings(options ...OptionFunc) (*Settings, *Respon
// UpdateSettingsOptions represents the available UpdateSettings() options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/settings.html#change-application.settings
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/settings.md#change-application.settings
type UpdateSettingsOptions struct {
DefaultProjectsLimit *int `url:"default_projects_limit,omitempty" json:"default_projects_limit,omitempty"`
SignupEnabled *bool `url:"signup_enabled,omitempty" json:"signup_enabled,omitempty"`
@@ -100,7 +102,7 @@ type UpdateSettingsOptions struct {
// UpdateSettings updates the application settings.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/settings.html#change-application.settings
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/settings.md#change-application.settings
func (s *SettingsService) UpdateSettings(opt *UpdateSettingsOptions, options ...OptionFunc) (*Settings, *Response, error) {
req, err := s.client.NewRequest("PUT", "application/settings", opt, options)
if err != nil {
diff --git a/vendor/github.com/xanzy/go-gitlab/system_hooks.go b/vendor/github.com/xanzy/go-gitlab/system_hooks.go
index 20277a9..d2d3f5b 100644
--- a/vendor/github.com/xanzy/go-gitlab/system_hooks.go
+++ b/vendor/github.com/xanzy/go-gitlab/system_hooks.go
@@ -24,14 +24,16 @@ import (
// SystemHooksService handles communication with the system hooks related
// methods of the GitLab API.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/system_hooks.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/system_hooks.md
type SystemHooksService struct {
client *Client
}
// Hook represents a GitLap system hook.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/system_hooks.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/system_hooks.md
type Hook struct {
ID int `json:"id"`
URL string `json:"url"`
@@ -45,7 +47,7 @@ func (h Hook) String() string {
// ListHooks gets a list of system hooks.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/system_hooks.html#list-system-hooks
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/system_hooks.md#list-system-hooks
func (s *SystemHooksService) ListHooks(options ...OptionFunc) ([]*Hook, *Response, error) {
req, err := s.client.NewRequest("GET", "hooks", nil, options)
if err != nil {
@@ -64,7 +66,7 @@ func (s *SystemHooksService) ListHooks(options ...OptionFunc) ([]*Hook, *Respons
// AddHookOptions represents the available AddHook() options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/system_hooks.html#add-new-system-hook-hook
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/system_hooks.md#add-new-system-hook-hook
type AddHookOptions struct {
URL *string `url:"url,omitempty" json:"url,omitempty"`
}
@@ -72,7 +74,7 @@ type AddHookOptions struct {
// AddHook adds a new system hook hook.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/system_hooks.html#add-new-system-hook-hook
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/system_hooks.md#add-new-system-hook-hook
func (s *SystemHooksService) AddHook(opt *AddHookOptions, options ...OptionFunc) (*Hook, *Response, error) {
req, err := s.client.NewRequest("POST", "hooks", opt, options)
if err != nil {
@@ -90,7 +92,8 @@ func (s *SystemHooksService) AddHook(opt *AddHookOptions, options ...OptionFunc)
// HookEvent represents an event triggert by a GitLab system hook.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/system_hooks.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/system_hooks.md
type HookEvent struct {
EventName string `json:"event_name"`
Name string `json:"name"`
@@ -107,7 +110,7 @@ func (h HookEvent) String() string {
// TestHook tests a system hook.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/system_hooks.html#test-system-hook
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/system_hooks.md#test-system-hook
func (s *SystemHooksService) TestHook(hook int, options ...OptionFunc) (*HookEvent, *Response, error) {
u := fmt.Sprintf("hooks/%d", hook)
@@ -130,7 +133,7 @@ func (s *SystemHooksService) TestHook(hook int, options ...OptionFunc) (*HookEve
// is also returned as JSON.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/system_hooks.html#delete-system-hook
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/system_hooks.md#delete-system-hook
func (s *SystemHooksService) DeleteHook(hook int, options ...OptionFunc) (*Response, error) {
u := fmt.Sprintf("hooks/%d", hook)
diff --git a/vendor/github.com/xanzy/go-gitlab/tags.go b/vendor/github.com/xanzy/go-gitlab/tags.go
index 196f6d4..10bfdf5 100644
--- a/vendor/github.com/xanzy/go-gitlab/tags.go
+++ b/vendor/github.com/xanzy/go-gitlab/tags.go
@@ -24,14 +24,16 @@ import (
// TagsService handles communication with the tags related methods
// of the GitLab API.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/tags.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/tags.md
type TagsService struct {
client *Client
}
// Tag represents a GitLab tag.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/tags.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/tags.md
type Tag struct {
Commit *Commit `json:"commit"`
Name string `json:"name"`
@@ -46,7 +48,7 @@ func (r Tag) String() string {
// alphabetical order.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/tags.html#list-project-repository-tags
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/tags.md#list-project-repository-tags
func (s *TagsService) ListTags(pid interface{}, options ...OptionFunc) ([]*Tag, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -72,7 +74,7 @@ func (s *TagsService) ListTags(pid interface{}, options ...OptionFunc) ([]*Tag,
// with the tag information if the tag exists. It returns 404 if the tag does not exist.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/tags.html#get-a-single-repository-tag
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/tags.md#get-a-single-repository-tag
func (s *TagsService) GetTag(pid interface{}, tag string, options ...OptionFunc) (*Tag, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -97,7 +99,7 @@ func (s *TagsService) GetTag(pid interface{}, tag string, options ...OptionFunc)
// CreateTagOptions represents the available CreateTag() options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/tags.html#create-a-new-tag
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/tags.md#create-a-new-tag
type CreateTagOptions struct {
TagName *string `url:"tag_name,omitempty" json:"tag_name,omitempty"`
Ref *string `url:"ref,omitempty" json:"ref,omitempty"`
@@ -107,7 +109,7 @@ type CreateTagOptions struct {
// CreateTag creates a new tag in the repository that points to the supplied ref.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/tags.html#create-a-new-tag
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/tags.md#create-a-new-tag
func (s *TagsService) CreateTag(pid interface{}, opt *CreateTagOptions, options ...OptionFunc) (*Tag, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -132,7 +134,7 @@ func (s *TagsService) CreateTag(pid interface{}, opt *CreateTagOptions, options
// DeleteTag deletes a tag of a repository with given name.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/tags.html#delete-a-tag
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/tags.md#delete-a-tag
func (s *TagsService) DeleteTag(pid interface{}, tag string, options ...OptionFunc) (*Response, error) {
project, err := parseID(pid)
if err != nil {
diff --git a/vendor/github.com/xanzy/go-gitlab/time_stats.go b/vendor/github.com/xanzy/go-gitlab/time_stats.go
index c4a4ad4..0e8fa55 100644
--- a/vendor/github.com/xanzy/go-gitlab/time_stats.go
+++ b/vendor/github.com/xanzy/go-gitlab/time_stats.go
@@ -8,15 +8,17 @@ import (
// TimeStatsService handles communication with the time tracking related
// methods of the GitLab API.
//
-// GitLab docs: https://docs.gitlab.com/ce/workflow/time_tracking.html
-// GitLab API docs: https://docs.gitlab.com/ce/api/issues.html
+// GitLab docs: https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/workflow/time_tracking.md
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md
type TimeStatsService struct {
client *Client
}
// TimeStats represents the time estimates and time spent for an issue.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/issues.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md
type TimeStats struct {
HumanTimeEstimate string `json:"human_time_estimate"`
HumanTotalTimeSpent string `json:"human_total_time_spent"`
@@ -32,7 +34,7 @@ func (t TimeStats) String() string {
// options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/issues.html#set-a-time-estimate-for-an-issue
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md#set-a-time-estimate-for-an-issue
type SetTimeEstimateOptions struct {
Duration *string `url:"duration,omitempty" json:"duration,omitempty"`
}
@@ -40,7 +42,7 @@ type SetTimeEstimateOptions struct {
// SetTimeEstimate sets the time estimate for a single project issue.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/issues.html#set-a-time-estimate-for-an-issue
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md#set-a-time-estimate-for-an-issue
func (s *TimeStatsService) SetTimeEstimate(pid interface{}, issue int, opt *SetTimeEstimateOptions, options ...OptionFunc) (*TimeStats, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -65,7 +67,7 @@ func (s *TimeStatsService) SetTimeEstimate(pid interface{}, issue int, opt *SetT
// ResetTimeEstimate resets the time estimate for a single project issue.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/issues.html#reset-the-time-estimate-for-an-issue
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md#reset-the-time-estimate-for-an-issue
func (s *TimeStatsService) ResetTimeEstimate(pid interface{}, issue int, options ...OptionFunc) (*TimeStats, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -90,7 +92,7 @@ func (s *TimeStatsService) ResetTimeEstimate(pid interface{}, issue int, options
// AddSpentTimeOptions represents the available AddSpentTime() options.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/issues.html#add-spent-time-for-an-issue
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md#add-spent-time-for-an-issue
type AddSpentTimeOptions struct {
Duration *string `url:"duration,omitempty" json:"duration,omitempty"`
}
@@ -98,7 +100,7 @@ type AddSpentTimeOptions struct {
// AddSpentTime adds spent time for a single project issue.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/issues.html#add-spent-time-for-an-issue
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md#add-spent-time-for-an-issue
func (s *TimeStatsService) AddSpentTime(pid interface{}, issue int, opt *AddSpentTimeOptions, options ...OptionFunc) (*TimeStats, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -123,7 +125,7 @@ func (s *TimeStatsService) AddSpentTime(pid interface{}, issue int, opt *AddSpen
// ResetSpentTime resets the spent time for a single project issue.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/issues.html#reset-spent-time-for-an-issue
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md#reset-spent-time-for-an-issue
func (s *TimeStatsService) ResetSpentTime(pid interface{}, issue int, options ...OptionFunc) (*TimeStats, *Response, error) {
project, err := parseID(pid)
if err != nil {
@@ -148,7 +150,7 @@ func (s *TimeStatsService) ResetSpentTime(pid interface{}, issue int, options ..
// GetTimeSpent gets the spent time for a single project issue.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/issues.html#get-time-tracking-stats
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/issues.md#get-time-tracking-stats
func (s *TimeStatsService) GetTimeSpent(pid interface{}, issue int, options ...OptionFunc) (*TimeStats, *Response, error) {
project, err := parseID(pid)
if err != nil {
diff --git a/vendor/github.com/xanzy/go-gitlab/users.go b/vendor/github.com/xanzy/go-gitlab/users.go
index ba955a8..350d593 100644
--- a/vendor/github.com/xanzy/go-gitlab/users.go
+++ b/vendor/github.com/xanzy/go-gitlab/users.go
@@ -25,14 +25,16 @@ import (
// UsersService handles communication with the user related methods of
// the GitLab API.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/users.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md
type UsersService struct {
client *Client
}
// User represents a GitLab user.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/users.html
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md
type User struct {
ID int `json:"id"`
Username string `json:"username"`
@@ -68,7 +70,8 @@ type UserIdentity struct {
// ListUsersOptions represents the available ListUsers() options.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/users.html#list-users
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#list-users
type ListUsersOptions struct {
ListOptions
Active *bool `url:"active,omitempty" json:"active,omitempty"`
@@ -78,7 +81,8 @@ type ListUsersOptions struct {
// ListUsers gets a list of users.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/users.html#list-users
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#list-users
func (s *UsersService) ListUsers(opt *ListUsersOptions, options ...OptionFunc) ([]*User, *Response, error) {
req, err := s.client.NewRequest("GET", "users", opt, options)
if err != nil {
@@ -96,7 +100,8 @@ func (s *UsersService) ListUsers(opt *ListUsersOptions, options ...OptionFunc) (
// GetUser gets a single user.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/users.html#single-user
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#single-user
func (s *UsersService) GetUser(user int, options ...OptionFunc) (*User, *Response, error) {
u := fmt.Sprintf("users/%d", user)
@@ -116,7 +121,8 @@ func (s *UsersService) GetUser(user int, options ...OptionFunc) (*User, *Respons
// CreateUserOptions represents the available CreateUser() options.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/users.html#user-creation
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#user-creation
type CreateUserOptions struct {
Email *string `url:"email,omitempty" json:"email,omitempty"`
Password *string `url:"password,omitempty" json:"password,omitempty"`
@@ -137,7 +143,8 @@ type CreateUserOptions struct {
// CreateUser creates a new user. Note only administrators can create new users.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/users.html#user-creation
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#user-creation
func (s *UsersService) CreateUser(opt *CreateUserOptions, options ...OptionFunc) (*User, *Response, error) {
req, err := s.client.NewRequest("POST", "users", opt, options)
if err != nil {
@@ -155,7 +162,8 @@ func (s *UsersService) CreateUser(opt *CreateUserOptions, options ...OptionFunc)
// ModifyUserOptions represents the available ModifyUser() options.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/users.html#user-modification
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#user-modification
type ModifyUserOptions struct {
Email *string `url:"email,omitempty" json:"email,omitempty"`
Password *string `url:"password,omitempty" json:"password,omitempty"`
@@ -176,7 +184,8 @@ type ModifyUserOptions struct {
// ModifyUser modifies an existing user. Only administrators can change attributes
// of a user.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/users.html#user-modification
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#user-modification
func (s *UsersService) ModifyUser(user int, opt *ModifyUserOptions, options ...OptionFunc) (*User, *Response, error) {
u := fmt.Sprintf("users/%d", user)
@@ -200,7 +209,8 @@ func (s *UsersService) ModifyUser(user int, opt *ModifyUserOptions, options ...O
// actually deleted or not. In the former the user is returned and in the
// latter not.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/users.html#user-deletion
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#user-deletion
func (s *UsersService) DeleteUser(user int, options ...OptionFunc) (*Response, error) {
u := fmt.Sprintf("users/%d", user)
@@ -214,7 +224,8 @@ func (s *UsersService) DeleteUser(user int, options ...OptionFunc) (*Response, e
// CurrentUser gets currently authenticated user.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/users.html#current-user
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#current-user
func (s *UsersService) CurrentUser(options ...OptionFunc) (*User, *Response, error) {
req, err := s.client.NewRequest("GET", "user", nil, options)
if err != nil {
@@ -232,7 +243,8 @@ func (s *UsersService) CurrentUser(options ...OptionFunc) (*User, *Response, err
// SSHKey represents a SSH key.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/users.html#list-ssh-keys
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#list-ssh-keys
type SSHKey struct {
ID int `json:"id"`
Title string `json:"title"`
@@ -242,7 +254,8 @@ type SSHKey struct {
// ListSSHKeys gets a list of currently authenticated user's SSH keys.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/users.html#list-ssh-keys
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#list-ssh-keys
func (s *UsersService) ListSSHKeys(options ...OptionFunc) ([]*SSHKey, *Response, error) {
req, err := s.client.NewRequest("GET", "user/keys", nil, options)
if err != nil {
@@ -262,7 +275,7 @@ func (s *UsersService) ListSSHKeys(options ...OptionFunc) ([]*SSHKey, *Response,
// only for admin
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/users.html#list-ssh-keys-for-user
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#list-ssh-keys-for-user
func (s *UsersService) ListSSHKeysForUser(user int, options ...OptionFunc) ([]*SSHKey, *Response, error) {
u := fmt.Sprintf("users/%d/keys", user)
@@ -282,7 +295,8 @@ func (s *UsersService) ListSSHKeysForUser(user int, options ...OptionFunc) ([]*S
// GetSSHKey gets a single key.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/users.html#single-ssh-key
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#single-ssh-key
func (s *UsersService) GetSSHKey(kid int, options ...OptionFunc) (*SSHKey, *Response, error) {
u := fmt.Sprintf("user/keys/%d", kid)
@@ -302,7 +316,8 @@ func (s *UsersService) GetSSHKey(kid int, options ...OptionFunc) (*SSHKey, *Resp
// AddSSHKeyOptions represents the available AddSSHKey() options.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/projects.html#add-ssh-key
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#add-ssh-key
type AddSSHKeyOptions struct {
Title *string `url:"title,omitempty" json:"title,omitempty"`
Key *string `url:"key,omitempty" json:"key,omitempty"`
@@ -310,7 +325,8 @@ type AddSSHKeyOptions struct {
// AddSSHKey creates a new key owned by the currently authenticated user.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/users.html#add-ssh-key
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#add-ssh-key
func (s *UsersService) AddSSHKey(opt *AddSSHKeyOptions, options ...OptionFunc) (*SSHKey, *Response, error) {
req, err := s.client.NewRequest("POST", "user/keys", opt, options)
if err != nil {
@@ -329,7 +345,8 @@ func (s *UsersService) AddSSHKey(opt *AddSSHKeyOptions, options ...OptionFunc) (
// AddSSHKeyForUser creates new key owned by specified user. Available only for
// admin.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/users.html#add-ssh-key-for-user
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#add-ssh-key-for-user
func (s *UsersService) AddSSHKeyForUser(user int, opt *AddSSHKeyOptions, options ...OptionFunc) (*SSHKey, *Response, error) {
u := fmt.Sprintf("users/%d/keys", user)
@@ -352,7 +369,7 @@ func (s *UsersService) AddSSHKeyForUser(user int, opt *AddSSHKeyOptions, options
// available results in 200 OK.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/users.html#delete-ssh-key-for-current-owner
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#delete-ssh-key-for-current-owner
func (s *UsersService) DeleteSSHKey(kid int, options ...OptionFunc) (*Response, error) {
u := fmt.Sprintf("user/keys/%d", kid)
@@ -368,7 +385,7 @@ func (s *UsersService) DeleteSSHKey(kid int, options ...OptionFunc) (*Response,
// for admin.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/users.html#delete-ssh-key-for-given-user
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#delete-ssh-key-for-given-user
func (s *UsersService) DeleteSSHKeyForUser(user int, kid int, options ...OptionFunc) (*Response, error) {
u := fmt.Sprintf("users/%d/keys/%d", user, kid)
@@ -382,7 +399,8 @@ func (s *UsersService) DeleteSSHKeyForUser(user int, kid int, options ...OptionF
// BlockUser blocks the specified user. Available only for admin.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/users.html#block-user
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#block-user
func (s *UsersService) BlockUser(user int, options ...OptionFunc) error {
u := fmt.Sprintf("users/%d/block", user)
@@ -402,7 +420,7 @@ func (s *UsersService) BlockUser(user int, options ...OptionFunc) error {
case 403:
return errors.New("Cannot block a user that is already blocked by LDAP synchronization")
case 404:
- return errors.New("User does not exists")
+ return errors.New("User does not exist")
default:
return fmt.Errorf("Received unexpected result code: %d", resp.StatusCode)
}
@@ -410,7 +428,8 @@ func (s *UsersService) BlockUser(user int, options ...OptionFunc) error {
// UnblockUser unblocks the specified user. Available only for admin.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/users.html#unblock-user
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#unblock-user
func (s *UsersService) UnblockUser(user int, options ...OptionFunc) error {
u := fmt.Sprintf("users/%d/unblock", user)
@@ -430,7 +449,7 @@ func (s *UsersService) UnblockUser(user int, options ...OptionFunc) error {
case 403:
return errors.New("Cannot unblock a user that is blocked by LDAP synchronization")
case 404:
- return errors.New("User does not exists")
+ return errors.New("User does not exist")
default:
return fmt.Errorf("Received unexpected result code: %d", resp.StatusCode)
}
@@ -446,7 +465,8 @@ type Email struct {
// ListEmails gets a list of currently authenticated user's Emails.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/users.html#list-emails
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#list-emails
func (s *UsersService) ListEmails(options ...OptionFunc) ([]*Email, *Response, error) {
req, err := s.client.NewRequest("GET", "user/emails", nil, options)
if err != nil {
@@ -466,7 +486,7 @@ func (s *UsersService) ListEmails(options ...OptionFunc) ([]*Email, *Response, e
// only for admin
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/users.html#list-emails-for-user
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#list-emails-for-user
func (s *UsersService) ListEmailsForUser(uid int, options ...OptionFunc) ([]*Email, *Response, error) {
u := fmt.Sprintf("users/%d/emails", uid)
@@ -486,7 +506,8 @@ func (s *UsersService) ListEmailsForUser(uid int, options ...OptionFunc) ([]*Ema
// GetEmail gets a single email.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/users.html#single-email
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#single-email
func (s *UsersService) GetEmail(eid int, options ...OptionFunc) (*Email, *Response, error) {
u := fmt.Sprintf("user/emails/%d", eid)
@@ -506,14 +527,16 @@ func (s *UsersService) GetEmail(eid int, options ...OptionFunc) (*Email, *Respon
// AddEmailOptions represents the available AddEmail() options.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/projects.html#add-email
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/projects.md#add-email
type AddEmailOptions struct {
Email *string `url:"email,omitempty" json:"email,omitempty"`
}
// AddEmail creates a new email owned by the currently authenticated user.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/users.html#add-email
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#add-email
func (s *UsersService) AddEmail(opt *AddEmailOptions, options ...OptionFunc) (*Email, *Response, error) {
req, err := s.client.NewRequest("POST", "user/emails", opt, options)
if err != nil {
@@ -532,7 +555,8 @@ func (s *UsersService) AddEmail(opt *AddEmailOptions, options ...OptionFunc) (*E
// AddEmailForUser creates new email owned by specified user. Available only for
// admin.
//
-// GitLab API docs: https://docs.gitlab.com/ce/api/users.html#add-email-for-user
+// GitLab API docs:
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#add-email-for-user
func (s *UsersService) AddEmailForUser(uid int, opt *AddEmailOptions, options ...OptionFunc) (*Email, *Response, error) {
u := fmt.Sprintf("users/%d/emails", uid)
@@ -555,7 +579,7 @@ func (s *UsersService) AddEmailForUser(uid int, opt *AddEmailOptions, options ..
// available results in 200 OK.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/users.html#delete-email-for-current-owner
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#delete-email-for-current-owner
func (s *UsersService) DeleteEmail(eid int, options ...OptionFunc) (*Response, error) {
u := fmt.Sprintf("user/emails/%d", eid)
@@ -571,7 +595,7 @@ func (s *UsersService) DeleteEmail(eid int, options ...OptionFunc) (*Response, e
// for admin.
//
// GitLab API docs:
-// https://docs.gitlab.com/ce/api/users.html#delete-email-for-given-user
+// https://gitlab.com/gitlab-org/gitlab-ce/blob/8-16-stable/doc/api/users.md#delete-email-for-given-user
func (s *UsersService) DeleteEmailForUser(uid int, eid int, options ...OptionFunc) (*Response, error) {
u := fmt.Sprintf("users/%d/emails/%d", uid, eid)